Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2017-07-14 18:36:08 +02:00
commit 6d999f074a
942 changed files with 20701 additions and 14324 deletions

View File

@ -347,7 +347,8 @@ These are the linux flavors the Vagrantfile currently supports:
* ubuntu-1404 aka trusty
* ubuntu-1604 aka xenial
* debian-8 aka jessie, the current debian stable distribution
* debian-8 aka jessie
* debian-9 aka stretch, the current debian stable distribution
* centos-6
* centos-7
* fedora-25
@ -470,6 +471,29 @@ is tested depends on the branch. On master, this will test against the current
stable branch. On the stable branch, it will test against the latest release
branch. Finally, on a release branch, it will test against the most recent release.
=== BWC Testing against a specific branch
Sometimes a backward compatibility change spans two versions. A common case is a new functionality
that needs a BWC bridge in and an unreleased versioned of a release branch (for example, 5.x).
To test the changes, you can instruct gradle to build the BWC version from a local branch instead of
pulling the release branch from GitHub. You do so using the `tests.bwc.refspec` system property:
-------------------------------------------------
gradle check -Dtests.bwc.refspec=origin/index_req_bwc_5.x
-------------------------------------------------
The branch needs to be available on the local clone that the BWC makes of the repository you run the
tests from. Using the `origin` remote is a handy trick to make sure that a branch is available
and is up to date in the case of multiple runs.
Example:
Say you need to make a change to `master` and have a BWC layer in `5.x`. You will need to:
. Create a branch called `index_req_change` off `master`. This will contain your change.
. Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer.
. If not running the tests locally, push both branches to your remote repository.
. Run the tests with `gradle check -Dtests.bwc.refspec=origin/index_req_bwc_5.x`
== Coverage analysis
Tests can be run instrumented with jacoco to produce a coverage report in

10
Vagrantfile vendored
View File

@ -33,13 +33,17 @@ Vagrant.configure(2) do |config|
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
end
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
# get the sun jdk on there just aren't worth it. We have jessie for testing
# debian and it works fine.
# Wheezy's backports don't contain Openjdk 8 and the backflips
# required to get the sun jdk on there just aren't worth it. We have
# jessie and stretch for testing debian and it works fine.
config.vm.define "debian-8" do |config|
config.vm.box = "elastic/debian-8-x86_64"
deb_common config
end
config.vm.define "debian-9" do |config|
config.vm.box = "elastic/debian-9-x86_64"
deb_common config
end
config.vm.define "centos-6" do |config|
config.vm.box = "elastic/centos-6-x86_64"
rpm_common config

View File

@ -214,8 +214,9 @@ subprojects {
"org.elasticsearch.gradle:build-tools:${version}": ':build-tools',
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':core',
"org.elasticsearch.client:rest:${version}": ':client:rest',
"org.elasticsearch.client:sniffer:${version}": ':client:sniffer',
"org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest',
"org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer',
"org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level',
"org.elasticsearch.client:test:${version}": ':client:test',
"org.elasticsearch.client:transport:${version}": ':client:transport',
"org.elasticsearch.test:framework:${version}": ':test:framework',

View File

@ -21,6 +21,7 @@ package org.elasticsearch.gradle.test
import org.apache.tools.ant.DefaultLogger
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension
@ -312,6 +313,9 @@ class ClusterFormationTasks {
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b'
esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b'
if (Version.fromString(node.nodeVersion).major >= 6) {
esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b'
}
esConfig.putAll(node.config.settings)
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)

View File

@ -158,7 +158,7 @@ class NodeInfo {
args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
}
}
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
env.put('CONF_DIR', confDir)
if (Version.fromString(nodeVersion).major == 5) {
args.addAll("-E", "path.conf=${confDir}")
} else {

View File

@ -18,6 +18,7 @@ class VagrantTestPlugin implements Plugin<Project> {
'centos-6',
'centos-7',
'debian-8',
'debian-9',
'fedora-25',
'oel-6',
'oel-7',

View File

@ -266,7 +266,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLog.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CommonGramsTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CustomAnalyzerProvider.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]ShingleTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]StemmerOverrideTokenFilterFactory.java" checks="LineLength" />
@ -564,9 +563,7 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLogTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicySettingsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLogTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PatternCaptureTokenFilterTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PreBuiltAnalyzerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]commongrams[/\\]CommonGramsTokenFilterFactoryTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineMergeIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]AbstractFieldDataTestCase.java" checks="LineLength" />

View File

@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-alpha3
lucene = 7.0.0-snapshot-ad2cb77
elasticsearch = 6.0.0-beta1
lucene = 7.0.0-snapshot-00142c9
# optional dependencies
spatial4j = 0.6
@ -10,10 +10,10 @@ snakeyaml = 1.15
# When updating log4j, please update also docs/java-api/index.asciidoc
log4j = 2.8.2
slf4j = 1.6.2
jna = 4.4.0
jna = 4.4.0-1
# test dependencies
randomizedrunner = 2.5.0
randomizedrunner = 2.5.2
junit = 4.12
httpclient = 4.5.2
# When updating httpcore, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy

View File

@ -53,7 +53,7 @@ task test(type: Test, overwrite: true)
dependencies {
compile 'org.apache.commons:commons-math3:3.2'
compile("org.elasticsearch.client:rest:${version}")
compile("org.elasticsearch.client:elasticsearch-rest-client:${version}")
// bottleneck should be the client, not Elasticsearch
compile project(path: ':client:client-benchmark-noop-api-plugin')
// for transport client

View File

@ -53,6 +53,6 @@ public class TransportNoopSearchAction extends HandledTransportAction<SearchRequ
new SearchHit[0], 0L, 0.0f),
new InternalAggregations(Collections.emptyList()),
new Suggest(Collections.emptyList()),
new SearchProfileShardResults(Collections.emptyMap()), false, false, 1), "", 1, 1, 0, new ShardSearchFailure[0]));
new SearchProfileShardResults(Collections.emptyMap()), false, false, 1), "", 1, 1, 0, 0, new ShardSearchFailure[0]));
}
}

View File

@ -20,12 +20,23 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
*/
apply plugin: 'elasticsearch.build'
apply plugin: 'elasticsearch.rest-test'
apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm'
group = 'org.elasticsearch.client'
archivesBaseName = 'elasticsearch-rest-high-level-client'
publishing {
publications {
nebula {
artifactId = archivesBaseName
}
}
}
dependencies {
compile "org.elasticsearch:elasticsearch:${version}"
compile "org.elasticsearch.client:rest:${version}"
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
compile "org.elasticsearch.plugin:parent-join-client:${version}"
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"

View File

@ -56,9 +56,9 @@ import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.adjacency.ParsedAdjacencyMatrix;
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter;
import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filters.ParsedFilters;
import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters;
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
@ -73,12 +73,12 @@ import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuil
import org.elasticsearch.search.aggregations.bucket.nested.ParsedNested;
import org.elasticsearch.search.aggregations.bucket.nested.ParsedReverseNested;
import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.ParsedDateRange;
import org.elasticsearch.search.aggregations.bucket.range.ParsedGeoDistance;
import org.elasticsearch.search.aggregations.bucket.range.ParsedRange;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.date.ParsedDateRange;
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.geodistance.ParsedGeoDistance;
import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler;
import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler;
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantLongTerms;

View File

@ -153,7 +153,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testSearchScroll() throws IOException {
Header[] headers = randomHeaders(random(), "Header");
SearchResponse mockSearchResponse = new SearchResponse(new SearchResponseSections(SearchHits.empty(), InternalAggregations.EMPTY,
null, false, false, null, 1), randomAlphaOfLengthBetween(5, 10), 5, 5, 100, new ShardSearchFailure[0]);
null, false, false, null, 1), randomAlphaOfLengthBetween(5, 10), 5, 5, 0, 100, new ShardSearchFailure[0]);
mockResponse(mockSearchResponse);
SearchResponse searchResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(randomAlphaOfLengthBetween(5, 10)),
headers);

View File

@ -269,7 +269,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(5, matrixStats.getFieldCount("num2"));
assertEquals(29d, matrixStats.getMean("num2"), 0d);
assertEquals(330d, matrixStats.getVariance("num2"), 0d);
assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 0d);
assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 1.0e-16);
assertEquals(1.3517561983471074, matrixStats.getKurtosis("num2"), 0d);
assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d);
assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d);

View File

@ -0,0 +1,958 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ContentType;
import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap;
/**
* This class is used to generate the Java CRUD API documentation.
* You need to wrap your code between two tags like:
* // tag::example[]
* // end::example[]
*
* Where example is your tag name.
*
* Then in the documentation, you can extract what is between tag and end tags with
* ["source","java",subs="attributes,callouts,macros"]
* --------------------------------------------------
* include-tagged::{doc-tests}/CRUDDocumentationIT.java[example]
* --------------------------------------------------
*/
public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
public void testIndex() throws IOException {
RestHighLevelClient client = highLevelClient();
{
//tag::index-request-map
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("user", "kimchy");
jsonMap.put("postDate", new Date());
jsonMap.put("message", "trying out Elasticsearch");
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1")
.source(jsonMap); // <1>
//end::index-request-map
IndexResponse indexResponse = client.index(indexRequest);
assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED);
}
{
//tag::index-request-xcontent
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
{
builder.field("user", "kimchy");
builder.field("postDate", new Date());
builder.field("message", "trying out Elasticsearch");
}
builder.endObject();
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1")
.source(builder); // <1>
//end::index-request-xcontent
IndexResponse indexResponse = client.index(indexRequest);
assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED);
}
{
//tag::index-request-shortcut
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1")
.source("user", "kimchy",
"postDate", new Date(),
"message", "trying out Elasticsearch"); // <1>
//end::index-request-shortcut
IndexResponse indexResponse = client.index(indexRequest);
assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED);
}
{
//tag::index-request-string
IndexRequest request = new IndexRequest(
"posts", // <1>
"doc", // <2>
"1"); // <3>
String jsonString = "{" +
"\"user\":\"kimchy\"," +
"\"postDate\":\"2013-01-30\"," +
"\"message\":\"trying out Elasticsearch\"" +
"}";
request.source(jsonString, XContentType.JSON); // <4>
//end::index-request-string
// tag::index-execute
IndexResponse indexResponse = client.index(request);
// end::index-execute
assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED);
// tag::index-response
String index = indexResponse.getIndex();
String type = indexResponse.getType();
String id = indexResponse.getId();
long version = indexResponse.getVersion();
if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
// <1>
} else if (indexResponse.getResult() == DocWriteResponse.Result.UPDATED) {
// <2>
}
ReplicationResponse.ShardInfo shardInfo = indexResponse.getShardInfo();
if (shardInfo.getTotal() != shardInfo.getSuccessful()) {
// <3>
}
if (shardInfo.getFailed() > 0) {
for (ReplicationResponse.ShardInfo.Failure failure : shardInfo.getFailures()) {
String reason = failure.reason(); // <4>
}
}
// end::index-response
// tag::index-execute-async
client.indexAsync(request, new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::index-execute-async
}
{
IndexRequest request = new IndexRequest("posts", "doc", "1");
// tag::index-request-routing
request.routing("routing"); // <1>
// end::index-request-routing
// tag::index-request-parent
request.parent("parent"); // <1>
// end::index-request-parent
// tag::index-request-timeout
request.timeout(TimeValue.timeValueSeconds(1)); // <1>
request.timeout("1s"); // <2>
// end::index-request-timeout
// tag::index-request-refresh
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <1>
request.setRefreshPolicy("wait_for"); // <2>
// end::index-request-refresh
// tag::index-request-version
request.version(2); // <1>
// end::index-request-version
// tag::index-request-version-type
request.versionType(VersionType.EXTERNAL); // <1>
// end::index-request-version-type
// tag::index-request-op-type
request.opType(DocWriteRequest.OpType.CREATE); // <1>
request.opType("create"); // <2>
// end::index-request-op-type
// tag::index-request-pipeline
request.setPipeline("pipeline"); // <1>
// end::index-request-pipeline
}
{
// tag::index-conflict
IndexRequest request = new IndexRequest("posts", "doc", "1")
.source("field", "value")
.version(1);
try {
IndexResponse response = client.index(request);
} catch(ElasticsearchException e) {
if (e.status() == RestStatus.CONFLICT) {
// <1>
}
}
// end::index-conflict
}
{
// tag::index-optype
IndexRequest request = new IndexRequest("posts", "doc", "1")
.source("field", "value")
.opType(DocWriteRequest.OpType.CREATE);
try {
IndexResponse response = client.index(request);
} catch(ElasticsearchException e) {
if (e.status() == RestStatus.CONFLICT) {
// <1>
}
}
// end::index-optype
}
}
public void testUpdate() throws IOException {
RestHighLevelClient client = highLevelClient();
{
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", 0);
IndexResponse indexResponse = client.index(indexRequest);
assertSame(indexResponse.status(), RestStatus.CREATED);
XContentType xContentType = XContentType.JSON;
String script = XContentBuilder.builder(xContentType.xContent())
.startObject()
.startObject("script")
.field("lang", "painless")
.field("code", "ctx._source.field += params.count")
.endObject()
.endObject().string();
HttpEntity body = new NStringEntity(script, ContentType.create(xContentType.mediaType()));
Response response = client().performRequest(HttpPost.METHOD_NAME, "/_scripts/increment-field", emptyMap(), body);
assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus());
}
{
//tag::update-request
UpdateRequest request = new UpdateRequest(
"posts", // <1>
"doc", // <2>
"1"); // <3>
//end::update-request
request.fetchSource(true);
//tag::update-request-with-inline-script
Map<String, Object> parameters = singletonMap("count", 4); // <1>
Script inline = new Script(ScriptType.INLINE, "painless", "ctx._source.field += params.count", parameters); // <2>
request.script(inline); // <3>
//end::update-request-with-inline-script
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
assertEquals(4, updateResponse.getGetResult().getSource().get("field"));
request = new UpdateRequest("posts", "doc", "1").fetchSource(true);
//tag::update-request-with-stored-script
Script stored =
new Script(ScriptType.STORED, null, "increment-field", parameters); // <1>
request.script(stored); // <2>
//end::update-request-with-stored-script
updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
assertEquals(8, updateResponse.getGetResult().getSource().get("field"));
}
{
//tag::update-request-with-doc-as-map
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("updated", new Date());
jsonMap.put("reason", "daily update");
UpdateRequest request = new UpdateRequest("posts", "doc", "1")
.doc(jsonMap); // <1>
//end::update-request-with-doc-as-map
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
}
{
//tag::update-request-with-doc-as-xcontent
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
{
builder.field("updated", new Date());
builder.field("reason", "daily update");
}
builder.endObject();
UpdateRequest request = new UpdateRequest("posts", "doc", "1")
.doc(builder); // <1>
//end::update-request-with-doc-as-xcontent
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
}
{
//tag::update-request-shortcut
UpdateRequest request = new UpdateRequest("posts", "doc", "1")
.doc("updated", new Date(),
"reason", "daily update"); // <1>
//end::update-request-shortcut
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
}
{
//tag::update-request-with-doc-as-string
UpdateRequest request = new UpdateRequest("posts", "doc", "1");
String jsonString = "{" +
"\"updated\":\"2017-01-01\"," +
"\"reason\":\"daily update\"" +
"}";
request.doc(jsonString, XContentType.JSON); // <1>
//end::update-request-with-doc-as-string
request.fetchSource(true);
// tag::update-execute
UpdateResponse updateResponse = client.update(request);
// end::update-execute
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
// tag::update-response
String index = updateResponse.getIndex();
String type = updateResponse.getType();
String id = updateResponse.getId();
long version = updateResponse.getVersion();
if (updateResponse.getResult() == DocWriteResponse.Result.CREATED) {
// <1>
} else if (updateResponse.getResult() == DocWriteResponse.Result.UPDATED) {
// <2>
} else if (updateResponse.getResult() == DocWriteResponse.Result.DELETED) {
// <3>
} else if (updateResponse.getResult() == DocWriteResponse.Result.NOOP) {
// <4>
}
// end::update-response
// tag::update-getresult
GetResult result = updateResponse.getGetResult(); // <1>
if (result.isExists()) {
String sourceAsString = result.sourceAsString(); // <2>
Map<String, Object> sourceAsMap = result.sourceAsMap(); // <3>
byte[] sourceAsBytes = result.source(); // <4>
} else {
// <5>
}
// end::update-getresult
assertNotNull(result);
assertEquals(3, result.sourceAsMap().size());
// tag::update-failure
ReplicationResponse.ShardInfo shardInfo = updateResponse.getShardInfo();
if (shardInfo.getTotal() != shardInfo.getSuccessful()) {
// <1>
}
if (shardInfo.getFailed() > 0) {
for (ReplicationResponse.ShardInfo.Failure failure : shardInfo.getFailures()) {
String reason = failure.reason(); // <2>
}
}
// end::update-failure
// tag::update-execute-async
client.updateAsync(request, new ActionListener<UpdateResponse>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::update-execute-async
}
{
//tag::update-docnotfound
UpdateRequest request = new UpdateRequest("posts", "type", "does_not_exist").doc("field", "value");
try {
UpdateResponse updateResponse = client.update(request);
} catch (ElasticsearchException e) {
if (e.status() == RestStatus.NOT_FOUND) {
// <1>
}
}
//end::update-docnotfound
}
{
// tag::update-conflict
UpdateRequest request = new UpdateRequest("posts", "doc", "1")
.doc("field", "value")
.version(1);
try {
UpdateResponse updateResponse = client.update(request);
} catch(ElasticsearchException e) {
if (e.status() == RestStatus.CONFLICT) {
// <1>
}
}
// end::update-conflict
}
{
UpdateRequest request = new UpdateRequest("posts", "doc", "1").doc("reason", "no source");
//tag::update-request-no-source
request.fetchSource(true); // <1>
//end::update-request-no-source
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
assertNotNull(updateResponse.getGetResult());
assertEquals(3, updateResponse.getGetResult().sourceAsMap().size());
}
{
UpdateRequest request = new UpdateRequest("posts", "doc", "1").doc("reason", "source includes");
//tag::update-request-source-include
String[] includes = new String[]{"updated", "r*"};
String[] excludes = Strings.EMPTY_ARRAY;
request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1>
//end::update-request-source-include
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
Map<String, Object> sourceAsMap = updateResponse.getGetResult().sourceAsMap();
assertEquals(2, sourceAsMap.size());
assertEquals("source includes", sourceAsMap.get("reason"));
assertTrue(sourceAsMap.containsKey("updated"));
}
{
UpdateRequest request = new UpdateRequest("posts", "doc", "1").doc("reason", "source excludes");
//tag::update-request-source-exclude
String[] includes = Strings.EMPTY_ARRAY;
String[] excludes = new String[]{"updated"};
request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1>
//end::update-request-source-exclude
UpdateResponse updateResponse = client.update(request);
assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
Map<String, Object> sourceAsMap = updateResponse.getGetResult().sourceAsMap();
assertEquals(2, sourceAsMap.size());
assertEquals("source excludes", sourceAsMap.get("reason"));
assertTrue(sourceAsMap.containsKey("field"));
}
{
UpdateRequest request = new UpdateRequest("posts", "doc", "id");
// tag::update-request-routing
request.routing("routing"); // <1>
// end::update-request-routing
// tag::update-request-parent
request.parent("parent"); // <1>
// end::update-request-parent
// tag::update-request-timeout
request.timeout(TimeValue.timeValueSeconds(1)); // <1>
request.timeout("1s"); // <2>
// end::update-request-timeout
// tag::update-request-retry
request.retryOnConflict(3); // <1>
// end::update-request-retry
// tag::update-request-refresh
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <1>
request.setRefreshPolicy("wait_for"); // <2>
// end::update-request-refresh
// tag::update-request-version
request.version(2); // <1>
// end::update-request-version
// tag::update-request-detect-noop
request.detectNoop(false); // <1>
// end::update-request-detect-noop
// tag::update-request-upsert
String jsonString = "{\"created\":\"2017-01-01\"}";
request.upsert(jsonString, XContentType.JSON); // <1>
// end::update-request-upsert
// tag::update-request-scripted-upsert
request.scriptedUpsert(true); // <1>
// end::update-request-scripted-upsert
// tag::update-request-doc-upsert
request.docAsUpsert(true); // <1>
// end::update-request-doc-upsert
// tag::update-request-active-shards
request.waitForActiveShards(2); // <1>
request.waitForActiveShards(ActiveShardCount.ALL); // <2>
// end::update-request-active-shards
}
}
public void testDelete() throws IOException {
RestHighLevelClient client = highLevelClient();
{
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", "value");
IndexResponse indexResponse = client.index(indexRequest);
assertSame(indexResponse.status(), RestStatus.CREATED);
}
{
// tag::delete-request
DeleteRequest request = new DeleteRequest(
"posts", // <1>
"doc", // <2>
"1"); // <3>
// end::delete-request
// tag::delete-execute
DeleteResponse deleteResponse = client.delete(request);
// end::delete-execute
assertSame(deleteResponse.getResult(), DocWriteResponse.Result.DELETED);
// tag::delete-response
String index = deleteResponse.getIndex();
String type = deleteResponse.getType();
String id = deleteResponse.getId();
long version = deleteResponse.getVersion();
ReplicationResponse.ShardInfo shardInfo = deleteResponse.getShardInfo();
if (shardInfo.getTotal() != shardInfo.getSuccessful()) {
// <1>
}
if (shardInfo.getFailed() > 0) {
for (ReplicationResponse.ShardInfo.Failure failure : shardInfo.getFailures()) {
String reason = failure.reason(); // <2>
}
}
// end::delete-response
// tag::delete-execute-async
client.deleteAsync(request, new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::delete-execute-async
}
{
DeleteRequest request = new DeleteRequest("posts", "doc", "1");
// tag::delete-request-routing
request.routing("routing"); // <1>
// end::delete-request-routing
// tag::delete-request-parent
request.parent("parent"); // <1>
// end::delete-request-parent
// tag::delete-request-timeout
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
request.timeout("2m"); // <2>
// end::delete-request-timeout
// tag::delete-request-refresh
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <1>
request.setRefreshPolicy("wait_for"); // <2>
// end::delete-request-refresh
// tag::delete-request-version
request.version(2); // <1>
// end::delete-request-version
// tag::delete-request-version-type
request.versionType(VersionType.EXTERNAL); // <1>
// end::delete-request-version-type
}
{
// tag::delete-notfound
DeleteRequest request = new DeleteRequest("posts", "doc", "does_not_exist");
DeleteResponse deleteResponse = client.delete(request);
if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) {
// <1>
}
// end::delete-notfound
}
{
IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value"));
assertSame(indexResponse.status(), RestStatus.CREATED);
// tag::delete-conflict
try {
DeleteRequest request = new DeleteRequest("posts", "doc", "1").version(2);
DeleteResponse deleteResponse = client.delete(request);
} catch (ElasticsearchException exception) {
if (exception.status() == RestStatus.CONFLICT) {
// <1>
}
}
// end::delete-conflict
}
}
public void testBulk() throws IOException {
RestHighLevelClient client = highLevelClient();
{
// tag::bulk-request
BulkRequest request = new BulkRequest(); // <1>
request.add(new IndexRequest("posts", "doc", "1") // <2>
.source(XContentType.JSON,"field", "foo"));
request.add(new IndexRequest("posts", "doc", "2") // <3>
.source(XContentType.JSON,"field", "bar"));
request.add(new IndexRequest("posts", "doc", "3") // <4>
.source(XContentType.JSON,"field", "baz"));
// end::bulk-request
// tag::bulk-execute
BulkResponse bulkResponse = client.bulk(request);
// end::bulk-execute
assertSame(bulkResponse.status(), RestStatus.OK);
assertFalse(bulkResponse.hasFailures());
}
{
// tag::bulk-request-with-mixed-operations
BulkRequest request = new BulkRequest();
request.add(new DeleteRequest("posts", "doc", "3")); // <1>
request.add(new UpdateRequest("posts", "doc", "2") // <2>
.doc(XContentType.JSON,"other", "test"));
request.add(new IndexRequest("posts", "doc", "4") // <3>
.source(XContentType.JSON,"field", "baz"));
// end::bulk-request-with-mixed-operations
BulkResponse bulkResponse = client.bulk(request);
assertSame(bulkResponse.status(), RestStatus.OK);
assertFalse(bulkResponse.hasFailures());
// tag::bulk-response
for (BulkItemResponse bulkItemResponse : bulkResponse) { // <1>
DocWriteResponse itemResponse = bulkItemResponse.getResponse(); // <2>
if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.INDEX
|| bulkItemResponse.getOpType() == DocWriteRequest.OpType.CREATE) { // <3>
IndexResponse indexResponse = (IndexResponse) itemResponse;
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.UPDATE) { // <4>
UpdateResponse updateResponse = (UpdateResponse) itemResponse;
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.DELETE) { // <5>
DeleteResponse deleteResponse = (DeleteResponse) itemResponse;
}
}
// end::bulk-response
// tag::bulk-has-failures
if (bulkResponse.hasFailures()) { // <1>
}
// end::bulk-has-failures
// tag::bulk-errors
for (BulkItemResponse bulkItemResponse : bulkResponse) {
if (bulkItemResponse.isFailed()) { // <1>
BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); // <2>
}
}
// end::bulk-errors
}
{
BulkRequest request = new BulkRequest();
// tag::bulk-request-timeout
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
request.timeout("2m"); // <2>
// end::bulk-request-timeout
// tag::bulk-request-refresh
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <1>
request.setRefreshPolicy("wait_for"); // <2>
// end::bulk-request-refresh
// tag::bulk-request-active-shards
request.waitForActiveShards(2); // <1>
request.waitForActiveShards(ActiveShardCount.ALL); // <2>
// end::bulk-request-active-shards
// tag::bulk-execute-async
client.bulkAsync(request, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::bulk-execute-async
}
}
public void testGet() throws IOException {
RestHighLevelClient client = highLevelClient();
{
String mappings = "{\n" +
" \"mappings\" : {\n" +
" \"doc\" : {\n" +
" \"properties\" : {\n" +
" \"message\" : {\n" +
" \"type\": \"text\",\n" +
" \"store\": true\n" +
" }\n" +
" }\n" +
" }\n" +
" }\n" +
"}";
NStringEntity entity = new NStringEntity(mappings, ContentType.APPLICATION_JSON);
Response response = client().performRequest("PUT", "/posts", Collections.emptyMap(), entity);
assertEquals(200, response.getStatusLine().getStatusCode());
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1")
.source("user", "kimchy",
"postDate", new Date(),
"message", "trying out Elasticsearch");
IndexResponse indexResponse = client.index(indexRequest);
assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED);
}
{
//tag::get-request
GetRequest getRequest = new GetRequest(
"posts", // <1>
"doc", // <2>
"1"); // <3>
//end::get-request
//tag::get-execute
GetResponse getResponse = client.get(getRequest);
//end::get-execute
assertTrue(getResponse.isExists());
assertEquals(3, getResponse.getSourceAsMap().size());
//tag::get-response
String index = getResponse.getIndex();
String type = getResponse.getType();
String id = getResponse.getId();
if (getResponse.isExists()) {
long version = getResponse.getVersion();
String sourceAsString = getResponse.getSourceAsString(); // <1>
Map<String, Object> sourceAsMap = getResponse.getSourceAsMap(); // <2>
byte[] sourceAsBytes = getResponse.getSourceAsBytes(); // <3>
} else {
// <4>
}
//end::get-response
}
{
GetRequest request = new GetRequest("posts", "doc", "1");
//tag::get-request-no-source
request.fetchSourceContext(new FetchSourceContext(false)); // <1>
//end::get-request-no-source
GetResponse getResponse = client.get(request);
assertNull(getResponse.getSourceInternal());
}
{
GetRequest request = new GetRequest("posts", "doc", "1");
//tag::get-request-source-include
String[] includes = new String[]{"message", "*Date"};
String[] excludes = Strings.EMPTY_ARRAY;
FetchSourceContext fetchSourceContext = new FetchSourceContext(true, includes, excludes);
request.fetchSourceContext(fetchSourceContext); // <1>
//end::get-request-source-include
GetResponse getResponse = client.get(request);
Map<String, Object> sourceAsMap = getResponse.getSourceAsMap();
assertEquals(2, sourceAsMap.size());
assertEquals("trying out Elasticsearch", sourceAsMap.get("message"));
assertTrue(sourceAsMap.containsKey("postDate"));
}
{
GetRequest request = new GetRequest("posts", "doc", "1");
//tag::get-request-source-exclude
String[] includes = Strings.EMPTY_ARRAY;
String[] excludes = new String[]{"message"};
FetchSourceContext fetchSourceContext = new FetchSourceContext(true, includes, excludes);
request.fetchSourceContext(fetchSourceContext); // <1>
//end::get-request-source-exclude
GetResponse getResponse = client.get(request);
Map<String, Object> sourceAsMap = getResponse.getSourceAsMap();
assertEquals(2, sourceAsMap.size());
assertEquals("kimchy", sourceAsMap.get("user"));
assertTrue(sourceAsMap.containsKey("postDate"));
}
{
GetRequest request = new GetRequest("posts", "doc", "1");
//tag::get-request-stored
request.storedFields("message"); // <1>
GetResponse getResponse = client.get(request);
String message = getResponse.getField("message").getValue(); // <2>
//end::get-request-stored
assertEquals("trying out Elasticsearch", message);
assertEquals(1, getResponse.getFields().size());
assertNull(getResponse.getSourceInternal());
}
{
GetRequest request = new GetRequest("posts", "doc", "1");
//tag::get-request-routing
request.routing("routing"); // <1>
//end::get-request-routing
//tag::get-request-parent
request.parent("parent"); // <1>
//end::get-request-parent
//tag::get-request-preference
request.preference("preference"); // <1>
//end::get-request-preference
//tag::get-request-realtime
request.realtime(false); // <1>
//end::get-request-realtime
//tag::get-request-refresh
request.refresh(true); // <1>
//end::get-request-refresh
//tag::get-request-version
request.version(2); // <1>
//end::get-request-version
//tag::get-request-version-type
request.versionType(VersionType.EXTERNAL); // <1>
//end::get-request-version-type
}
{
GetRequest request = new GetRequest("posts", "doc", "1");
//tag::get-execute-async
client.getAsync(request, new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse getResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
//end::get-execute-async
}
{
//tag::get-indexnotfound
GetRequest request = new GetRequest("does_not_exist", "doc", "1");
try {
GetResponse getResponse = client.get(request);
} catch (ElasticsearchException e) {
if (e.status() == RestStatus.NOT_FOUND) {
// <1>
}
}
//end::get-indexnotfound
}
{
// tag::get-conflict
try {
GetRequest request = new GetRequest("posts", "doc", "1").version(2);
GetResponse getResponse = client.get(request);
} catch (ElasticsearchException exception) {
if (exception.status() == RestStatus.CONFLICT) {
// <1>
}
}
// end::get-conflict
}
}
public void testBulkProcessor() throws InterruptedException, IOException {
Settings settings = Settings.builder().put("node.name", "my-application").build();
RestHighLevelClient client = highLevelClient();
{
// tag::bulk-processor-init
ThreadPool threadPool = new ThreadPool(settings); // <1>
BulkProcessor.Listener listener = new BulkProcessor.Listener() { // <2>
@Override
public void beforeBulk(long executionId, BulkRequest request) {
// <3>
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
// <4>
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
// <5>
}
};
BulkProcessor bulkProcessor = new BulkProcessor.Builder(client::bulkAsync, listener, threadPool)
.build(); // <6>
// end::bulk-processor-init
assertNotNull(bulkProcessor);
// tag::bulk-processor-add
IndexRequest one = new IndexRequest("posts", "doc", "1").
source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?");
IndexRequest two = new IndexRequest("posts", "doc", "2")
.source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch");
IndexRequest three = new IndexRequest("posts", "doc", "3")
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch");
bulkProcessor.add(one);
bulkProcessor.add(two);
bulkProcessor.add(three);
// end::bulk-processor-add
// tag::bulk-processor-await
boolean terminated = bulkProcessor.awaitClose(30L, TimeUnit.SECONDS); // <1>
// end::bulk-processor-await
assertTrue(terminated);
// tag::bulk-processor-close
bulkProcessor.close();
// end::bulk-processor-close
terminate(threadPool);
}
{
// tag::bulk-processor-listener
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
int numberOfActions = request.numberOfActions(); // <1>
logger.debug("Executing bulk [{}] with {} requests", executionId, numberOfActions);
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
if (response.hasFailures()) { // <2>
logger.warn("Bulk [{}] executed with failures", executionId);
} else {
logger.debug("Bulk [{}] completed in {} milliseconds", executionId, response.getTook().getMillis());
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.error("Failed to execute bulk", failure); // <3>
}
};
// end::bulk-processor-listener
ThreadPool threadPool = new ThreadPool(settings);
try {
// tag::bulk-processor-options
BulkProcessor.Builder builder = new BulkProcessor.Builder(client::bulkAsync, listener, threadPool);
builder.setBulkActions(500); // <1>
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB)); // <2>
builder.setConcurrentRequests(0); // <3>
builder.setFlushInterval(TimeValue.timeValueSeconds(10L)); // <4>
builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3)); // <5>
// end::bulk-processor-options
} finally {
terminate(threadPool);
}
}
}
}

View File

@ -1,112 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
/**
* This class is used to generate the Java Delete API documentation.
* You need to wrap your code between two tags like:
* // tag::example[]
* // end::example[]
*
* Where example is your tag name.
*
* Then in the documentation, you can extract what is between tag and end tags with
* ["source","java",subs="attributes,callouts"]
* --------------------------------------------------
* sys2::[perl -ne 'exit if /end::example/; print if $tag; $tag = $tag || /tag::example/' \
* {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java]
* --------------------------------------------------
*/
public class DeleteDocumentationIT extends ESRestHighLevelClientTestCase {
/**
* This test documents docs/java-rest/high-level/document/delete.asciidoc
*/
public void testDelete() throws IOException {
RestHighLevelClient client = highLevelClient();
// tag::delete-request
DeleteRequest request = new DeleteRequest(
"index", // <1>
"type", // <2>
"id"); // <3>
// end::delete-request
// tag::delete-request-props
request.timeout(TimeValue.timeValueSeconds(1)); // <1>
request.timeout("1s"); // <2>
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <3>
request.setRefreshPolicy("wait_for"); // <4>
request.version(2); // <5>
request.versionType(VersionType.EXTERNAL); // <6>
// end::delete-request-props
// tag::delete-execute
DeleteResponse response = client.delete(request);
// end::delete-execute
try {
// tag::delete-notfound
if (response.getResult().equals(DocWriteResponse.Result.NOT_FOUND)) {
throw new Exception("Can't find document to be removed"); // <1>
}
// end::delete-notfound
} catch (Exception ignored) { }
// tag::delete-execute-async
client.deleteAsync(request, new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::delete-execute-async
// tag::delete-conflict
try {
client.delete(request);
} catch (ElasticsearchException exception) {
if (exception.status().equals(RestStatus.CONFLICT)) {
// <1>
}
}
// end::delete-conflict
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.index.query.GeoShapeQueryBuilder;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder;
import org.elasticsearch.join.query.JoinQueryBuilders;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.test.ESTestCase;
@ -77,8 +78,6 @@ import static org.elasticsearch.index.query.QueryBuilders.typeQuery;
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction;
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery;
import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery;
/**
* Examples of using the transport client that are imported into the transport client documentation.
@ -218,7 +217,7 @@ public class QueryDSLDocumentationTests extends ESTestCase {
public void testHasChild() {
// tag::has_child
hasChildQuery(
JoinQueryBuilders.hasChildQuery(
"blog_tag", // <1>
termQuery("tag","something"), // <2>
ScoreMode.None); // <3>
@ -227,7 +226,7 @@ public class QueryDSLDocumentationTests extends ESTestCase {
public void testHasParent() {
// tag::has_parent
hasParentQuery(
JoinQueryBuilders.hasParentQuery(
"blog", // <1>
termQuery("tag","something"), // <2>
false); // <3>
@ -339,7 +338,7 @@ public class QueryDSLDocumentationTests extends ESTestCase {
parameters.put("param1", 5);
scriptQuery(new Script(
ScriptType.STORED, // <1>
"painless", // <2>
null, // <2>
"myscript", // <3>
singletonMap("param1", 5))); // <4>
// end::script_file

View File

@ -0,0 +1,487 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.Avg;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.ScoreSortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestBuilders;
import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.term.TermSuggestion;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.hamcrest.Matchers.greaterThan;
/**
* This class is used to generate the Java High Level REST Client Search API documentation.
* <p>
* You need to wrap your code between two tags like:
* // tag::example[]
* // end::example[]
* <p>
* Where example is your tag name.
* <p>
* Then in the documentation, you can extract what is between tag and end tags with
* ["source","java",subs="attributes,callouts,macros"]
* --------------------------------------------------
* include-tagged::{doc-tests}/SearchDocumentationIT.java[example]
* --------------------------------------------------
*/
public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
@SuppressWarnings({ "unused", "unchecked" })
public void testSearch() throws IOException {
RestHighLevelClient client = highLevelClient();
{
BulkRequest request = new BulkRequest();
request.add(new IndexRequest("posts", "doc", "1")
.source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user",
Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value")));
request.add(new IndexRequest("posts", "doc", "2")
.source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user",
Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value")));
request.add(new IndexRequest("posts", "doc", "3")
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user",
Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value")));
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse bulkResponse = client.bulk(request);
assertSame(bulkResponse.status(), RestStatus.OK);
assertFalse(bulkResponse.hasFailures());
}
{
// tag::search-request-basic
SearchRequest searchRequest = new SearchRequest(); // <1>
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // <2>
searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // <3>
// end::search-request-basic
}
{
// tag::search-request-indices-types
SearchRequest searchRequest = new SearchRequest("posts"); // <1>
searchRequest.types("doc"); // <2>
// end::search-request-indices-types
// tag::search-request-routing
searchRequest.routing("routing"); // <1>
// end::search-request-routing
// tag::search-request-indicesOptions
searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
// end::search-request-indicesOptions
// tag::search-request-preference
searchRequest.preference("_local"); // <1>
// end::search-request-preference
assertNotNull(client.search(searchRequest));
}
{
// tag::search-source-basics
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); // <1>
sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); // <2>
sourceBuilder.from(0); // <3>
sourceBuilder.size(5); // <4>
sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.ASC));
sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); // <5>
// end::search-source-basics
// tag::search-source-setter
SearchRequest searchRequest = new SearchRequest();
searchRequest.source(sourceBuilder);
// end::search-source-setter
// tag::search-execute
SearchResponse searchResponse = client.search(searchRequest);
// end::search-execute
// tag::search-execute-async
client.searchAsync(searchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::search-execute-async
// tag::search-response-1
RestStatus status = searchResponse.status();
TimeValue took = searchResponse.getTook();
Boolean terminatedEarly = searchResponse.isTerminatedEarly();
boolean timedOut = searchResponse.isTimedOut();
// end::search-response-1
// tag::search-response-2
int totalShards = searchResponse.getTotalShards();
int successfulShards = searchResponse.getSuccessfulShards();
int failedShards = searchResponse.getFailedShards();
for (ShardSearchFailure failure : searchResponse.getShardFailures()) {
// failures should be handled here
}
// end::search-response-2
assertNotNull(searchResponse);
// tag::search-hits-get
SearchHits hits = searchResponse.getHits();
// end::search-hits-get
// tag::search-hits-info
long totalHits = hits.getTotalHits();
float maxScore = hits.getMaxScore();
// end::search-hits-info
// tag::search-hits-singleHit
SearchHit[] searchHits = hits.getHits();
for (SearchHit hit : searchHits) {
// do something with the SearchHit
}
// end::search-hits-singleHit
for (SearchHit hit : searchHits) {
// tag::search-hits-singleHit-properties
String index = hit.getIndex();
String type = hit.getType();
String id = hit.getId();
float score = hit.getScore();
// end::search-hits-singleHit-properties
// tag::search-hits-singleHit-source
String sourceAsString = hit.getSourceAsString();
Map<String, Object> sourceAsMap = hit.getSourceAsMap();
String documentTitle = (String) sourceAsMap.get("title");
List<Object> users = (List<Object>) sourceAsMap.get("user");
Map<String, Object> innerObject = (Map<String, Object>) sourceAsMap.get("innerObject");
// end::search-hits-singleHit-source
}
assertEquals(3, totalHits);
assertNotNull(hits.getHits()[0].getSourceAsString());
assertNotNull(hits.getHits()[0].getSourceAsMap().get("title"));
assertNotNull(hits.getHits()[0].getSourceAsMap().get("user"));
assertNotNull(hits.getHits()[0].getSourceAsMap().get("innerObject"));
}
}
@SuppressWarnings({ "unused" })
public void testSearchRequestAggregations() throws IOException {
RestHighLevelClient client = highLevelClient();
{
BulkRequest request = new BulkRequest();
request.add(new IndexRequest("posts", "doc", "1")
.source(XContentType.JSON, "company", "Elastic", "age", 20));
request.add(new IndexRequest("posts", "doc", "2")
.source(XContentType.JSON, "company", "Elastic", "age", 30));
request.add(new IndexRequest("posts", "doc", "3")
.source(XContentType.JSON, "company", "Elastic", "age", 40));
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse bulkResponse = client.bulk(request);
assertSame(bulkResponse.status(), RestStatus.OK);
assertFalse(bulkResponse.hasFailures());
}
{
SearchRequest searchRequest = new SearchRequest();
// tag::search-request-aggregations
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
TermsAggregationBuilder aggregation = AggregationBuilders.terms("by_company")
.field("company.keyword");
aggregation.subAggregation(AggregationBuilders.avg("average_age")
.field("age"));
searchSourceBuilder.aggregation(aggregation);
// end::search-request-aggregations
searchSourceBuilder.query(QueryBuilders.matchAllQuery());
searchRequest.source(searchSourceBuilder);
SearchResponse searchResponse = client.search(searchRequest);
{
// tag::search-request-aggregations-get
Aggregations aggregations = searchResponse.getAggregations();
Terms byCompanyAggregation = aggregations.get("by_company"); // <1>
Bucket elasticBucket = byCompanyAggregation.getBucketByKey("Elastic"); // <2>
Avg averageAge = elasticBucket.getAggregations().get("average_age"); // <3>
double avg = averageAge.getValue();
// end::search-request-aggregations-get
try {
// tag::search-request-aggregations-get-wrongCast
Range range = aggregations.get("by_company"); // <1>
// end::search-request-aggregations-get-wrongCast
} catch (ClassCastException ex) {
assertEquals("org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms"
+ " cannot be cast to org.elasticsearch.search.aggregations.bucket.range.Range", ex.getMessage());
}
assertEquals(3, elasticBucket.getDocCount());
assertEquals(30, avg, 0.0);
}
Aggregations aggregations = searchResponse.getAggregations();
{
// tag::search-request-aggregations-asMap
Map<String, Aggregation> aggregationMap = aggregations.getAsMap();
Terms companyAggregation = (Terms) aggregationMap.get("by_company");
// end::search-request-aggregations-asMap
}
{
// tag::search-request-aggregations-asList
List<Aggregation> aggregationList = aggregations.asList();
// end::search-request-aggregations-asList
}
{
// tag::search-request-aggregations-iterator
for (Aggregation agg : aggregations) {
String type = agg.getType();
if (type.equals(TermsAggregationBuilder.NAME)) {
Bucket elasticBucket = ((Terms) agg).getBucketByKey("Elastic");
long numberOfDocs = elasticBucket.getDocCount();
}
}
// end::search-request-aggregations-iterator
}
}
}
@SuppressWarnings({ "unused", "rawtypes" })
public void testSearchRequestSuggestions() throws IOException {
RestHighLevelClient client = highLevelClient();
{
BulkRequest request = new BulkRequest();
request.add(new IndexRequest("posts", "doc", "1").source(XContentType.JSON, "user", "kimchy"));
request.add(new IndexRequest("posts", "doc", "2").source(XContentType.JSON, "user", "javanna"));
request.add(new IndexRequest("posts", "doc", "3").source(XContentType.JSON, "user", "tlrx"));
request.add(new IndexRequest("posts", "doc", "4").source(XContentType.JSON, "user", "cbuescher"));
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse bulkResponse = client.bulk(request);
assertSame(bulkResponse.status(), RestStatus.OK);
assertFalse(bulkResponse.hasFailures());
}
{
SearchRequest searchRequest = new SearchRequest();
// tag::search-request-suggestion
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
SuggestionBuilder termSuggestionBuilder =
SuggestBuilders.termSuggestion("user").text("kmichy"); // <1>
SuggestBuilder suggestBuilder = new SuggestBuilder();
suggestBuilder.addSuggestion("suggest_user", termSuggestionBuilder); // <2>
searchSourceBuilder.suggest(suggestBuilder);
// end::search-request-suggestion
searchRequest.source(searchSourceBuilder);
SearchResponse searchResponse = client.search(searchRequest);
{
// tag::search-request-suggestion-get
Suggest suggest = searchResponse.getSuggest(); // <1>
TermSuggestion termSuggestion = suggest.getSuggestion("suggest_user"); // <2>
for (TermSuggestion.Entry entry : termSuggestion.getEntries()) { // <3>
for (TermSuggestion.Entry.Option option : entry) { // <4>
String suggestText = option.getText().string();
}
}
// end::search-request-suggestion-get
assertEquals(1, termSuggestion.getEntries().size());
assertEquals(1, termSuggestion.getEntries().get(0).getOptions().size());
assertEquals("kimchy", termSuggestion.getEntries().get(0).getOptions().get(0).getText().string());
}
}
}
public void testScroll() throws IOException {
RestHighLevelClient client = highLevelClient();
{
BulkRequest request = new BulkRequest();
request.add(new IndexRequest("posts", "doc", "1")
.source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?"));
request.add(new IndexRequest("posts", "doc", "2")
.source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch"));
request.add(new IndexRequest("posts", "doc", "3")
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch"));
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse bulkResponse = client.bulk(request);
assertSame(bulkResponse.status(), RestStatus.OK);
assertFalse(bulkResponse.hasFailures());
}
{
int size = 1;
// tag::search-scroll-init
SearchRequest searchRequest = new SearchRequest("posts");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(matchQuery("title", "Elasticsearch"));
searchSourceBuilder.size(size); // <1>
searchRequest.source(searchSourceBuilder);
searchRequest.scroll(TimeValue.timeValueMinutes(1L)); // <2>
SearchResponse searchResponse = client.search(searchRequest);
String scrollId = searchResponse.getScrollId(); // <3>
SearchHits hits = searchResponse.getHits(); // <4>
// end::search-scroll-init
assertEquals(3, hits.getTotalHits());
assertEquals(1, hits.getHits().length);
assertNotNull(scrollId);
// tag::search-scroll2
SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); // <1>
scrollRequest.scroll(TimeValue.timeValueSeconds(30));
SearchResponse searchScrollResponse = client.searchScroll(scrollRequest);
scrollId = searchScrollResponse.getScrollId(); // <2>
hits = searchScrollResponse.getHits(); // <3>
assertEquals(3, hits.getTotalHits());
assertEquals(1, hits.getHits().length);
assertNotNull(scrollId);
// end::search-scroll2
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(scrollId);
ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest);
assertTrue(clearScrollResponse.isSucceeded());
}
{
SearchRequest searchRequest = new SearchRequest();
searchRequest.scroll("60s");
SearchResponse initialSearchResponse = client.search(searchRequest);
String scrollId = initialSearchResponse.getScrollId();
SearchScrollRequest scrollRequest = new SearchScrollRequest();
scrollRequest.scrollId(scrollId);
// tag::scroll-request-arguments
scrollRequest.scroll(TimeValue.timeValueSeconds(60L)); // <1>
scrollRequest.scroll("60s"); // <2>
// end::scroll-request-arguments
// tag::search-scroll-execute-sync
SearchResponse searchResponse = client.searchScroll(scrollRequest);
// end::search-scroll-execute-sync
assertEquals(0, searchResponse.getFailedShards());
assertEquals(3L, searchResponse.getHits().getTotalHits());
// tag::search-scroll-execute-async
client.searchScrollAsync(scrollRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::search-scroll-execute-async
// tag::clear-scroll-request
ClearScrollRequest request = new ClearScrollRequest(); // <1>
request.addScrollId(scrollId); // <2>
// end::clear-scroll-request
// tag::clear-scroll-add-scroll-id
request.addScrollId(scrollId);
// end::clear-scroll-add-scroll-id
List<String> scrollIds = Collections.singletonList(scrollId);
// tag::clear-scroll-add-scroll-ids
request.setScrollIds(scrollIds);
// end::clear-scroll-add-scroll-ids
// tag::clear-scroll-execute
ClearScrollResponse response = client.clearScroll(request);
// end::clear-scroll-execute
// tag::clear-scroll-response
boolean success = response.isSucceeded(); // <1>
int released = response.getNumFreed(); // <2>
// end::clear-scroll-response
assertTrue(success);
assertThat(released, greaterThan(0));
// tag::clear-scroll-execute-async
client.clearScrollAsync(request, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse clearScrollResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::clear-scroll-execute-async
}
{
// tag::search-scroll-example
final Scroll scroll = new Scroll(TimeValue.timeValueMinutes(1L));
SearchRequest searchRequest = new SearchRequest("posts");
searchRequest.scroll(scroll);
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(matchQuery("title", "Elasticsearch"));
searchRequest.source(searchSourceBuilder);
SearchResponse searchResponse = client.search(searchRequest); // <1>
String scrollId = searchResponse.getScrollId();
SearchHit[] searchHits = searchResponse.getHits().getHits();
while (searchHits != null && searchHits.length > 0) { // <2>
SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); // <3>
scrollRequest.scroll(scroll);
searchResponse = client.searchScroll(scrollRequest);
scrollId = searchResponse.getScrollId();
searchHits = searchResponse.getHits().getHits();
// <4>
}
ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); // <5>
clearScrollRequest.addScrollId(scrollId);
ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest);
boolean succeeded = clearScrollResponse.isSucceeded();
// end::search-scroll-example
assertTrue(succeeded);
}
}
}

View File

@ -18,7 +18,6 @@
*/
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.JavaVersion
apply plugin: 'elasticsearch.build'
apply plugin: 'ru.vyarus.animalsniffer'
@ -29,6 +28,15 @@ targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = 'org.elasticsearch.client'
archivesBaseName = 'elasticsearch-rest-client'
publishing {
publications {
nebula {
artifactId = archivesBaseName
}
}
}
dependencies {
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
@ -63,11 +71,6 @@ forbiddenApisTest {
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
}
dependencyLicenses {
mapping from: /http.*/, to: 'httpclient'
mapping from: /commons-.*/, to: 'commons'
}
//JarHell is part of es core, which we don't want to pull in
jarHell.enabled=false

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,6 @@
Apache Commons Logging
Copyright 2003-2013 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1,182 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
This project contains annotations derived from JCIP-ANNOTATIONS
Copyright (c) 2005 Brian Goetz and Tim Peierls.
See http://www.jcip.net and the Creative Commons Attribution License
(http://creativecommons.org/licenses/by/2.5)

View File

@ -0,0 +1,5 @@
Apache HttpComponents AsyncClient
Copyright 2010-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1,178 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1,5 @@
Apache HttpComponents Core
Copyright 2005-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,8 @@
Apache HttpCore NIO
Copyright 2005-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1,337 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.RequestLine;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.entity.ContentType;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.impl.nio.reactor.IOReactorConfig;
import org.apache.http.message.BasicHeader;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseListener;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.CertificateException;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
/**
* This class is used to generate the Java low-level REST client documentation.
* You need to wrap your code between two tags like:
* // tag::example[]
* // end::example[]
*
* Where example is your tag name.
*
* Then in the documentation, you can extract what is between tag and end tags with
* ["source","java",subs="attributes,callouts,macros"]
* --------------------------------------------------
* include-tagged::{doc-tests}/RestClientDocumentation.java[example]
* --------------------------------------------------
*
* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want
* to send requests to a node and we don't even have the tools to do it.
*/
@SuppressWarnings("unused")
public class RestClientDocumentation {
@SuppressWarnings("unused")
public void testUsage() throws IOException, InterruptedException {
//tag::rest-client-init
RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http"),
new HttpHost("localhost", 9201, "http")).build();
//end::rest-client-init
//tag::rest-client-close
restClient.close();
//end::rest-client-close
{
//tag::rest-client-init-default-headers
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
Header[] defaultHeaders = new Header[]{new BasicHeader("header", "value")};
builder.setDefaultHeaders(defaultHeaders); // <1>
//end::rest-client-init-default-headers
}
{
//tag::rest-client-init-max-retry-timeout
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
builder.setMaxRetryTimeoutMillis(10000); // <1>
//end::rest-client-init-max-retry-timeout
}
{
//tag::rest-client-init-failure-listener
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
builder.setFailureListener(new RestClient.FailureListener() {
@Override
public void onFailure(HttpHost host) {
// <1>
}
});
//end::rest-client-init-failure-listener
}
{
//tag::rest-client-init-request-config-callback
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
@Override
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
return requestConfigBuilder.setSocketTimeout(10000); // <1>
}
});
//end::rest-client-init-request-config-callback
}
{
//tag::rest-client-init-client-config-callback
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder.setProxy(new HttpHost("proxy", 9000, "http")); // <1>
}
});
//end::rest-client-init-client-config-callback
}
{
//tag::rest-client-verb-endpoint
Response response = restClient.performRequest("GET", "/"); // <1>
//end::rest-client-verb-endpoint
}
{
//tag::rest-client-headers
Response response = restClient.performRequest("GET", "/", new BasicHeader("header", "value"));
//end::rest-client-headers
}
{
//tag::rest-client-verb-endpoint-params
Map<String, String> params = Collections.singletonMap("pretty", "true");
Response response = restClient.performRequest("GET", "/", params); // <1>
//end::rest-client-verb-endpoint-params
}
{
//tag::rest-client-verb-endpoint-params-body
Map<String, String> params = Collections.emptyMap();
String jsonString = "{" +
"\"user\":\"kimchy\"," +
"\"postDate\":\"2013-01-30\"," +
"\"message\":\"trying out Elasticsearch\"" +
"}";
HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON);
Response response = restClient.performRequest("PUT", "/posts/doc/1", params, entity); // <1>
//end::rest-client-verb-endpoint-params-body
}
{
//tag::rest-client-response-consumer
Map<String, String> params = Collections.emptyMap();
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024);
Response response = restClient.performRequest("GET", "/posts/_search", params, null, consumerFactory); // <1>
//end::rest-client-response-consumer
}
{
//tag::rest-client-verb-endpoint-async
ResponseListener responseListener = new ResponseListener() {
@Override
public void onSuccess(Response response) {
// <1>
}
@Override
public void onFailure(Exception exception) {
// <2>
}
};
restClient.performRequestAsync("GET", "/", responseListener); // <3>
//end::rest-client-verb-endpoint-async
//tag::rest-client-headers-async
Header[] headers = {
new BasicHeader("header1", "value1"),
new BasicHeader("header2", "value2")
};
restClient.performRequestAsync("GET", "/", responseListener, headers);
//end::rest-client-headers-async
//tag::rest-client-verb-endpoint-params-async
Map<String, String> params = Collections.singletonMap("pretty", "true");
restClient.performRequestAsync("GET", "/", params, responseListener); // <1>
//end::rest-client-verb-endpoint-params-async
//tag::rest-client-verb-endpoint-params-body-async
String jsonString = "{" +
"\"user\":\"kimchy\"," +
"\"postDate\":\"2013-01-30\"," +
"\"message\":\"trying out Elasticsearch\"" +
"}";
HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON);
restClient.performRequestAsync("PUT", "/posts/doc/1", params, entity, responseListener); // <1>
//end::rest-client-verb-endpoint-params-body-async
//tag::rest-client-response-consumer-async
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024);
restClient.performRequestAsync("GET", "/posts/_search", params, null, consumerFactory, responseListener); // <1>
//end::rest-client-response-consumer-async
}
{
//tag::rest-client-response2
Response response = restClient.performRequest("GET", "/");
RequestLine requestLine = response.getRequestLine(); // <1>
HttpHost host = response.getHost(); // <2>
int statusCode = response.getStatusLine().getStatusCode(); // <3>
Header[] headers = response.getHeaders(); // <4>
String responseBody = EntityUtils.toString(response.getEntity()); // <5>
//end::rest-client-response2
}
{
HttpEntity[] documents = new HttpEntity[10];
//tag::rest-client-async-example
final CountDownLatch latch = new CountDownLatch(documents.length);
for (int i = 0; i < documents.length; i++) {
restClient.performRequestAsync(
"PUT",
"/posts/doc/" + i,
Collections.<String, String>emptyMap(),
//let's assume that the documents are stored in an HttpEntity array
documents[i],
new ResponseListener() {
@Override
public void onSuccess(Response response) {
// <1>
latch.countDown();
}
@Override
public void onFailure(Exception exception) {
// <2>
latch.countDown();
}
}
);
}
latch.await();
//end::rest-client-async-example
}
}
@SuppressWarnings("unused")
public void testCommonConfiguration() throws IOException, KeyStoreException, CertificateException, NoSuchAlgorithmException {
{
//tag::rest-client-config-timeouts
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200))
.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
@Override
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
return requestConfigBuilder.setConnectTimeout(5000)
.setSocketTimeout(60000);
}
})
.setMaxRetryTimeoutMillis(60000);
//end::rest-client-config-timeouts
}
{
//tag::rest-client-config-threads
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200))
.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder.setDefaultIOReactorConfig(
IOReactorConfig.custom().setIoThreadCount(1).build());
}
});
//end::rest-client-config-threads
}
{
//tag::rest-client-config-basic-auth
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY,
new UsernamePasswordCredentials("user", "password"));
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200))
.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
}
});
//end::rest-client-config-basic-auth
}
{
//tag::rest-client-config-disable-preemptive-auth
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY,
new UsernamePasswordCredentials("user", "password"));
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200))
.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
httpClientBuilder.disableAuthCaching(); // <1>
return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
}
});
//end::rest-client-config-disable-preemptive-auth
}
{
Path keyStorePath = Paths.get("");
String keyStorePass = "";
final SSLContext sslContext = null;
//tag::rest-client-config-encrypted-communication
KeyStore keystore = KeyStore.getInstance("jks");
try (InputStream is = Files.newInputStream(keyStorePath)) {
keystore.load(is, keyStorePass.toCharArray());
}
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200))
.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder.setSSLContext(sslContext);
}
});
//end::rest-client-config-encrypted-communication
}
}
}

View File

@ -18,7 +18,6 @@
*/
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.JavaVersion
apply plugin: 'elasticsearch.build'
apply plugin: 'ru.vyarus.animalsniffer'
@ -29,9 +28,18 @@ targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = 'org.elasticsearch.client'
archivesBaseName = 'elasticsearch-rest-client-sniffer'
publishing {
publications {
nebula {
artifactId = archivesBaseName
}
}
}
dependencies {
compile "org.elasticsearch.client:rest:${version}"
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "commons-codec:commons-codec:${versions.commonscodec}"

View File

@ -0,0 +1,131 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.sniff.documentation;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer;
import org.elasticsearch.client.sniff.HostsSniffer;
import org.elasticsearch.client.sniff.SniffOnFailureListener;
import org.elasticsearch.client.sniff.Sniffer;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* This class is used to generate the Java low-level REST client documentation.
* You need to wrap your code between two tags like:
* // tag::example[]
* // end::example[]
*
* Where example is your tag name.
*
* Then in the documentation, you can extract what is between tag and end tags with
* ["source","java",subs="attributes,callouts,macros"]
* --------------------------------------------------
* include-tagged::{doc-tests}/SnifferDocumentation.java[example]
* --------------------------------------------------
*
* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want
* to send requests to a node and we don't even have the tools to do it.
*/
@SuppressWarnings("unused")
public class SnifferDocumentation {
@SuppressWarnings("unused")
public void testUsage() throws IOException {
{
//tag::sniffer-init
RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http"))
.build();
Sniffer sniffer = Sniffer.builder(restClient).build();
//end::sniffer-init
//tag::sniffer-close
sniffer.close();
restClient.close();
//end::sniffer-close
}
{
//tag::sniffer-interval
RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http"))
.build();
Sniffer sniffer = Sniffer.builder(restClient)
.setSniffIntervalMillis(60000).build();
//end::sniffer-interval
}
{
//tag::sniff-on-failure
SniffOnFailureListener sniffOnFailureListener = new SniffOnFailureListener();
RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200))
.setFailureListener(sniffOnFailureListener) // <1>
.build();
Sniffer sniffer = Sniffer.builder(restClient)
.setSniffAfterFailureDelayMillis(30000) // <2>
.build();
sniffOnFailureListener.setSniffer(sniffer); // <3>
//end::sniff-on-failure
}
{
//tag::sniffer-https
RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http"))
.build();
HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer(
restClient,
ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT,
ElasticsearchHostsSniffer.Scheme.HTTPS);
Sniffer sniffer = Sniffer.builder(restClient)
.setHostsSniffer(hostsSniffer).build();
//end::sniffer-https
}
{
//tag::sniff-request-timeout
RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http"))
.build();
HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer(
restClient,
TimeUnit.SECONDS.toMillis(5),
ElasticsearchHostsSniffer.Scheme.HTTP);
Sniffer sniffer = Sniffer.builder(restClient)
.setHostsSniffer(hostsSniffer).build();
//end::sniff-request-timeout
}
{
//tag::custom-hosts-sniffer
RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http"))
.build();
HostsSniffer hostsSniffer = new HostsSniffer() {
@Override
public List<HttpHost> sniffHosts() throws IOException {
return null; // <1>
}
};
Sniffer sniffer = Sniffer.builder(restClient)
.setHostsSniffer(hostsSniffer).build();
//end::custom-hosts-sniffer
}
}
}

View File

@ -0,0 +1 @@
c9dfcec6f07ee4b1d7a6c09a7eaa9dd4fb6d2c79

View File

@ -1 +0,0 @@
6edc9b4514969d768039acf43f04210b15658cd7

View File

@ -0,0 +1 @@
f77cf26c5b00236aee5f2153ec73ad93a1de2c8c

View File

@ -1 +0,0 @@
00d3260223eac0405a82eeeb8439de0e5eb5f888

View File

@ -0,0 +1 @@
818396925ddb710f1f922265242c1bff5c7bc45f

View File

@ -1 +0,0 @@
3a698989219afd9150738899bc849075c102881b

View File

@ -0,0 +1 @@
decbf76ec732066c26158fff8763b77bb55fffcc

View File

@ -1 +0,0 @@
bb636d31949418943454dbe2d72b9b66cd743f9f

View File

@ -0,0 +1 @@
b14da3f80fb98ee7278f47d65db541b9db91fc9f

View File

@ -1 +0,0 @@
720252d786273edcc48b2ae7b380bc229fe8930c

View File

@ -0,0 +1 @@
bf009e463e0bca00f968286ca978031e1bbdf69d

View File

@ -1 +0,0 @@
735178c26f3eb361c30657beeec9e57bd5548d58

View File

@ -0,0 +1 @@
828f3d137269a5dc4bdd42de09520ea65590b5a2

View File

@ -1 +0,0 @@
de5e5cd9b00be4d005d0e51c74084be6c07b0bbd

View File

@ -0,0 +1 @@
384cb98eb67b525b3b3e534c69cf53e0d8579499

View File

@ -1 +0,0 @@
796ca5e5a9af3cc21f50156fa7e614338ec15ceb

View File

@ -0,0 +1 @@
329975dde472be9e76f4f13d83869fddeef79354

View File

@ -1 +0,0 @@
7ba802083c4c97a07d9487c2b26ee39e4f8e3c7e

View File

@ -0,0 +1 @@
ab36aba4f7e725194db07c72388a1e064badea7b

View File

@ -1 +0,0 @@
d66adfdb3f330b726420db5f8db21b17a0d9991d

View File

@ -0,0 +1 @@
fecf827029d47ac96aa7ad31b3bdc0ef5d624fed

View File

@ -1 +0,0 @@
569c6362cb87858fc282fd786ba0fda0c44f0a8b

View File

@ -0,0 +1 @@
0940f5c7740b9e95d417ec7ab230196247aca1ac

View File

@ -1 +0,0 @@
0ba62e91082910b1057027b8912395da670105d0

View File

@ -0,0 +1 @@
83a509fb49c3933bf58a977e7b1b7e8f980af220

View File

@ -1 +0,0 @@
968e678dc4a236bbc8e4c2eb66f5702ea48aae10

View File

@ -0,0 +1 @@
87d04768126970eefb3803e41bbfb8951f7e25e4

View File

@ -1 +0,0 @@
579670cc27104fdbd627959b7982a99eab1d16d1

View File

@ -0,0 +1 @@
75614090f2a1422b1c385e049fffd2652bde4d3f

View File

@ -1 +0,0 @@
53f3fc06ed3357dc75d7b050172520aa86d41010

View File

@ -0,0 +1 @@
6a259acc9ba6d83f8091d282ef0b0ddff295c68f

View File

@ -1 +0,0 @@
5281aa095f4f46580ea2008ffd040733096d0246

View File

@ -0,0 +1,164 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.queries;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
import java.util.Objects;
public final class BinaryDocValuesRangeQuery extends Query {
private final String fieldName;
private final QueryType queryType;
private final BytesRef from;
private final BytesRef to;
private final Object originalFrom;
private final Object originalTo;
public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, BytesRef from, BytesRef to,
Object originalFrom, Object originalTo) {
this.fieldName = fieldName;
this.queryType = queryType;
this.from = from;
this.to = to;
this.originalFrom = originalFrom;
this.originalTo = originalTo;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final BinaryDocValues values = context.reader().getBinaryDocValues(fieldName);
if (values == null) {
return null;
}
final TwoPhaseIterator iterator = new TwoPhaseIterator(values) {
ByteArrayDataInput in = new ByteArrayDataInput();
BytesRef otherFrom = new BytesRef(16);
BytesRef otherTo = new BytesRef(16);
@Override
public boolean matches() throws IOException {
BytesRef encodedRanges = values.binaryValue();
in.reset(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length);
int numRanges = in.readVInt();
for (int i = 0; i < numRanges; i++) {
otherFrom.length = in.readVInt();
otherFrom.bytes = encodedRanges.bytes;
otherFrom.offset = in.getPosition();
in.skipBytes(otherFrom.length);
otherTo.length = in.readVInt();
otherTo.bytes = encodedRanges.bytes;
otherTo.offset = in.getPosition();
in.skipBytes(otherTo.length);
if (queryType.matches(from, to, otherFrom, otherTo)) {
return true;
}
}
return false;
}
@Override
public float matchCost() {
return 4; // at most 4 comparisons
}
};
return new ConstantScoreScorer(this, score(), iterator);
}
};
}
@Override
public String toString(String field) {
return "BinaryDocValuesRangeQuery(fieldName=" + field + ",from=" + originalFrom + ",to=" + originalTo + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
BinaryDocValuesRangeQuery that = (BinaryDocValuesRangeQuery) o;
return Objects.equals(fieldName, that.fieldName) &&
queryType == that.queryType &&
Objects.equals(from, that.from) &&
Objects.equals(to, that.to);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), fieldName, queryType, from, to);
}
public enum QueryType {
INTERSECTS {
@Override
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
// part of the other range must touch this range
// this: |---------------|
// other: |------|
return from.compareTo(otherTo) <= 0 && to.compareTo(otherFrom) >= 0;
}
}, WITHIN {
@Override
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
// other range must entirely lie within this range
// this: |---------------|
// other: |------|
return from.compareTo(otherFrom) <= 0 && to.compareTo(otherTo) >= 0;
}
}, CONTAINS {
@Override
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
// this and other range must overlap
// this: |------|
// other: |---------------|
return from.compareTo(otherFrom) >= 0 && to.compareTo(otherTo) <= 0;
}
}, CROSSES {
@Override
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
// does not disjoint AND not within:
return (from.compareTo(otherTo) > 0 || to.compareTo(otherFrom) < 0) == false &&
(from.compareTo(otherFrom) <= 0 && to.compareTo(otherTo) >= 0) == false;
}
};
abstract boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo);
}
}

View File

@ -1,282 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.queryparser.classic;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.MultiTermQuery;
import org.elasticsearch.common.unit.Fuzziness;
import org.joda.time.DateTimeZone;
import java.util.Map;
/**
* Encapsulates settings that affect query_string parsing via {@link MapperQueryParser}
*/
public class QueryParserSettings {
private final String queryString;
private String defaultField;
private Map<String, Float> fieldsAndWeights;
private QueryParser.Operator defaultOperator;
private Analyzer analyzer;
private boolean forceAnalyzer;
private Analyzer quoteAnalyzer;
private boolean forceQuoteAnalyzer;
private String quoteFieldSuffix;
private boolean autoGeneratePhraseQueries;
private boolean allowLeadingWildcard;
private boolean analyzeWildcard;
private boolean enablePositionIncrements;
private Fuzziness fuzziness;
private int fuzzyPrefixLength;
private int fuzzyMaxExpansions;
private MultiTermQuery.RewriteMethod fuzzyRewriteMethod;
private int phraseSlop;
private boolean useDisMax;
private float tieBreaker;
private MultiTermQuery.RewriteMethod rewriteMethod;
private boolean lenient;
private DateTimeZone timeZone;
/** To limit effort spent determinizing regexp queries. */
private int maxDeterminizedStates;
private boolean splitOnWhitespace;
public QueryParserSettings(String queryString) {
this.queryString = queryString;
}
public String queryString() {
return queryString;
}
public String defaultField() {
return defaultField;
}
public void defaultField(String defaultField) {
this.defaultField = defaultField;
}
public Map<String, Float> fieldsAndWeights() {
return fieldsAndWeights;
}
public void fieldsAndWeights(Map<String, Float> fieldsAndWeights) {
this.fieldsAndWeights = fieldsAndWeights;
}
public QueryParser.Operator defaultOperator() {
return defaultOperator;
}
public void defaultOperator(QueryParser.Operator defaultOperator) {
this.defaultOperator = defaultOperator;
}
public boolean autoGeneratePhraseQueries() {
return autoGeneratePhraseQueries;
}
public void autoGeneratePhraseQueries(boolean autoGeneratePhraseQueries) {
this.autoGeneratePhraseQueries = autoGeneratePhraseQueries;
}
public int maxDeterminizedStates() {
return maxDeterminizedStates;
}
public void maxDeterminizedStates(int maxDeterminizedStates) {
this.maxDeterminizedStates = maxDeterminizedStates;
}
public boolean allowLeadingWildcard() {
return allowLeadingWildcard;
}
public void allowLeadingWildcard(boolean allowLeadingWildcard) {
this.allowLeadingWildcard = allowLeadingWildcard;
}
public boolean enablePositionIncrements() {
return enablePositionIncrements;
}
public void enablePositionIncrements(boolean enablePositionIncrements) {
this.enablePositionIncrements = enablePositionIncrements;
}
public int phraseSlop() {
return phraseSlop;
}
public void phraseSlop(int phraseSlop) {
this.phraseSlop = phraseSlop;
}
public int fuzzyPrefixLength() {
return fuzzyPrefixLength;
}
public void fuzzyPrefixLength(int fuzzyPrefixLength) {
this.fuzzyPrefixLength = fuzzyPrefixLength;
}
public int fuzzyMaxExpansions() {
return fuzzyMaxExpansions;
}
public void fuzzyMaxExpansions(int fuzzyMaxExpansions) {
this.fuzzyMaxExpansions = fuzzyMaxExpansions;
}
public MultiTermQuery.RewriteMethod fuzzyRewriteMethod() {
return fuzzyRewriteMethod;
}
public void fuzzyRewriteMethod(MultiTermQuery.RewriteMethod fuzzyRewriteMethod) {
this.fuzzyRewriteMethod = fuzzyRewriteMethod;
}
public void defaultAnalyzer(Analyzer analyzer) {
this.analyzer = analyzer;
this.forceAnalyzer = false;
}
public void forceAnalyzer(Analyzer analyzer) {
this.analyzer = analyzer;
this.forceAnalyzer = true;
}
public Analyzer analyzer() {
return analyzer;
}
public boolean forceAnalyzer() {
return forceAnalyzer;
}
public void defaultQuoteAnalyzer(Analyzer quoteAnalyzer) {
this.quoteAnalyzer = quoteAnalyzer;
this.forceQuoteAnalyzer = false;
}
public void forceQuoteAnalyzer(Analyzer quoteAnalyzer) {
this.quoteAnalyzer = quoteAnalyzer;
this.forceQuoteAnalyzer = true;
}
public Analyzer quoteAnalyzer() {
return quoteAnalyzer;
}
public boolean forceQuoteAnalyzer() {
return forceQuoteAnalyzer;
}
public boolean analyzeWildcard() {
return this.analyzeWildcard;
}
public void analyzeWildcard(boolean analyzeWildcard) {
this.analyzeWildcard = analyzeWildcard;
}
public MultiTermQuery.RewriteMethod rewriteMethod() {
return this.rewriteMethod;
}
public void rewriteMethod(MultiTermQuery.RewriteMethod rewriteMethod) {
this.rewriteMethod = rewriteMethod;
}
public void quoteFieldSuffix(String quoteFieldSuffix) {
this.quoteFieldSuffix = quoteFieldSuffix;
}
public String quoteFieldSuffix() {
return this.quoteFieldSuffix;
}
public void lenient(boolean lenient) {
this.lenient = lenient;
}
public boolean lenient() {
return this.lenient;
}
public float tieBreaker() {
return tieBreaker;
}
public void tieBreaker(float tieBreaker) {
this.tieBreaker = tieBreaker;
}
public boolean useDisMax() {
return useDisMax;
}
public void useDisMax(boolean useDisMax) {
this.useDisMax = useDisMax;
}
public void timeZone(DateTimeZone timeZone) {
this.timeZone = timeZone;
}
public DateTimeZone timeZone() {
return this.timeZone;
}
public void fuzziness(Fuzziness fuzziness) {
this.fuzziness = fuzziness;
}
public Fuzziness fuzziness() {
return fuzziness;
}
public void splitOnWhitespace(boolean value) {
this.splitOnWhitespace = value;
}
public boolean splitOnWhitespace() {
return splitOnWhitespace;
}
}

View File

@ -17,27 +17,24 @@
* under the License.
*/
package org.elasticsearch.action.fieldstats;
package org.apache.lucene.queryparser.classic;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.Query;
public class FieldStatsAction extends Action<FieldStatsRequest, FieldStatsResponse, FieldStatsRequestBuilder> {
public static final FieldStatsAction INSTANCE = new FieldStatsAction();
public static final String NAME = "indices:data/read/field_stats";
private FieldStatsAction() {
super(NAME);
/**
* This class is just a workaround to make {@link QueryParser#handleBareFuzzy(String, Token, String)} accessible by sub-classes.
* It is needed for {@link QueryParser}s that need to override the parsing of the slop in a fuzzy query (e.g. word<b>~2</b>, word<b>~</b>).
*
* TODO: We should maybe rewrite this with the flexible query parser which matches the same syntax with more freedom.
*/
public class XQueryParser extends QueryParser {
public XQueryParser(String f, Analyzer a) {
super(f, a);
}
@Override
public FieldStatsResponse newResponse() {
return new FieldStatsResponse();
}
@Override
public FieldStatsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new FieldStatsRequestBuilder(client, this);
protected Query handleBareFuzzy(String field, Token fuzzySlop, String termImage) throws ParseException {
return super.handleBareFuzzy(field, fuzzySlop, termImage);
}
}

View File

@ -40,7 +40,7 @@ public final class CollapseTopFieldDocs extends TopFieldDocs {
/** The collapse value for each top doc */
public final Object[] collapseValues;
public CollapseTopFieldDocs(String field, int totalHits, ScoreDoc[] scoreDocs,
public CollapseTopFieldDocs(String field, long totalHits, ScoreDoc[] scoreDocs,
SortField[] sortFields, Object[] values, float maxScore) {
super(totalHits, scoreDocs, sortFields, maxScore);
this.field = field;
@ -170,7 +170,7 @@ public final class CollapseTopFieldDocs extends TopFieldDocs {
}
final PriorityQueue<ShardRef> queue = new MergeSortQueue(sort, shardHits);
int totalHitCount = 0;
long totalHitCount = 0;
int availHitCount = 0;
float maxScore = Float.MIN_VALUE;
for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {

View File

@ -84,6 +84,8 @@ public class Version implements Comparable<Version> {
public static final Version V_5_4_3 = new Version(V_5_4_3_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
public static final int V_5_5_0_ID = 5050099;
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
public static final int V_5_5_1_ID = 5050199;
public static final Version V_5_5_1 = new Version(V_5_5_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
public static final int V_5_6_0_ID = 5060099;
public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
public static final int V_6_0_0_alpha1_ID = 6000001;
@ -92,10 +94,10 @@ public class Version implements Comparable<Version> {
public static final int V_6_0_0_alpha2_ID = 6000002;
public static final Version V_6_0_0_alpha2 =
new Version(V_6_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final int V_6_0_0_alpha3_ID = 6000003;
public static final Version V_6_0_0_alpha3 =
new Version(V_6_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final Version CURRENT = V_6_0_0_alpha3;
public static final int V_6_0_0_beta1_ID = 6000026;
public static final Version V_6_0_0_beta1 =
new Version(V_6_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final Version CURRENT = V_6_0_0_beta1;
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
@ -110,14 +112,16 @@ public class Version implements Comparable<Version> {
public static Version fromId(int id) {
switch (id) {
case V_6_0_0_alpha3_ID:
return V_6_0_0_alpha3;
case V_6_0_0_beta1_ID:
return V_6_0_0_beta1;
case V_6_0_0_alpha2_ID:
return V_6_0_0_alpha2;
case V_6_0_0_alpha1_ID:
return V_6_0_0_alpha1;
case V_5_6_0_ID:
return V_5_6_0;
case V_5_5_1_ID:
return V_5_5_1;
case V_5_5_0_ID:
return V_5_5_0;
case V_5_4_3_ID:
@ -304,8 +308,8 @@ public class Version implements Comparable<Version> {
final int bwcMajor;
final int bwcMinor;
if (major == 6) { // we only specialize for current major here
bwcMajor = Version.V_5_4_0.major;
bwcMinor = Version.V_5_4_0.minor;
bwcMajor = Version.V_5_5_0.major;
bwcMinor = Version.V_5_5_0.minor;
} else if (major > 6) { // all the future versions are compatible with first minor...
bwcMajor = major -1;
bwcMinor = 0;

View File

@ -156,8 +156,6 @@ import org.elasticsearch.action.explain.TransportExplainAction;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction;
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction;
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesIndexAction;
import org.elasticsearch.action.fieldstats.FieldStatsAction;
import org.elasticsearch.action.fieldstats.TransportFieldStatsAction;
import org.elasticsearch.action.get.GetAction;
import org.elasticsearch.action.get.MultiGetAction;
import org.elasticsearch.action.get.TransportGetAction;
@ -213,7 +211,6 @@ import org.elasticsearch.plugins.ActionPlugin.ActionHandler;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
import org.elasticsearch.rest.action.RestFieldStatsAction;
import org.elasticsearch.rest.action.RestMainAction;
import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction;
import org.elasticsearch.rest.action.admin.cluster.RestClusterAllocationExplainAction;
@ -493,7 +490,6 @@ public class ActionModule extends AbstractModule {
actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class);
actions.register(FieldCapabilitiesAction.INSTANCE, TransportFieldCapabilitiesAction.class,
TransportFieldCapabilitiesIndexAction.class);
@ -607,7 +603,6 @@ public class ActionModule extends AbstractModule {
registerHandler.accept(new RestPutStoredScriptAction(settings, restController));
registerHandler.accept(new RestDeleteStoredScriptAction(settings, restController));
registerHandler.accept(new RestFieldStatsAction(settings, restController));
registerHandler.accept(new RestFieldCapabilitiesAction(settings, restController));
// Tasks API

View File

@ -139,7 +139,7 @@ public class TransportClusterAllocationExplainAction
foundShard = ui.next();
}
if (foundShard == null) {
throw new IllegalStateException("unable to find any unassigned shards to explain [" + request + "]");
throw new IllegalArgumentException("unable to find any unassigned shards to explain [" + request + "]");
}
} else {
String index = request.getIndex();
@ -151,7 +151,8 @@ public class TransportClusterAllocationExplainAction
DiscoveryNode primaryNode = allocation.nodes().resolveNode(request.getCurrentNode());
// the primary is assigned to a node other than the node specified in the request
if (primaryNode.getId().equals(foundShard.currentNodeId()) == false) {
throw new IllegalStateException("unable to find primary shard assigned to node [" + request.getCurrentNode() + "]");
throw new IllegalArgumentException(
"unable to find primary shard assigned to node [" + request.getCurrentNode() + "]");
}
}
} else {
@ -168,7 +169,7 @@ public class TransportClusterAllocationExplainAction
}
}
if (foundShard == null) {
throw new IllegalStateException("unable to find a replica shard assigned to node [" +
throw new IllegalArgumentException("unable to find a replica shard assigned to node [" +
request.getCurrentNode() + "]");
}
} else {
@ -193,7 +194,7 @@ public class TransportClusterAllocationExplainAction
}
if (foundShard == null) {
throw new IllegalStateException("unable to find any shards to explain [" + request + "] in the routing table");
throw new IllegalArgumentException("unable to find any shards to explain [" + request + "] in the routing table");
}
return foundShard;
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.indices.analyze;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
import org.elasticsearch.common.Strings;
@ -59,6 +60,8 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
private String[] attributes = Strings.EMPTY_ARRAY;
private String normalizer;
public static class NameOrDefinition implements Writeable {
// exactly one of these two members is not null
public final String name;
@ -202,12 +205,27 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
return this.attributes;
}
public String normalizer() {
return this.normalizer;
}
public AnalyzeRequest normalizer(String normalizer) {
this.normalizer = normalizer;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (text == null || text.length == 0) {
validationException = addValidationError("text is missing", validationException);
}
if ((index == null || index.length() == 0) && normalizer != null) {
validationException = addValidationError("index is required if normalizer is specified", validationException);
}
if (normalizer != null && (tokenizer != null || analyzer != null)) {
validationException = addValidationError("tokenizer/analyze should be null if normalizer is specified", validationException);
}
return validationException;
}
@ -222,6 +240,9 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
field = in.readOptionalString();
explain = in.readBoolean();
attributes = in.readStringArray();
if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
normalizer = in.readOptionalString();
}
}
@Override
@ -235,5 +256,8 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
out.writeOptionalString(field);
out.writeBoolean(explain);
out.writeStringArray(attributes);
if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
out.writeOptionalString(normalizer);
}
}
}

View File

@ -125,4 +125,13 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
request.text(texts);
return this;
}
/**
* Instead of setting the analyzer and tokenizer, sets the normalizer as name
*/
public AnalyzeRequestBuilder setNormalizer(String normalizer) {
request.normalizer(normalizer);
return this;
}
}

View File

@ -51,6 +51,7 @@ import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.CustomAnalyzerProvider;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
@ -60,6 +61,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -178,21 +180,46 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
}
}
} else if (request.tokenizer() != null) {
final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings();
Tuple<String, TokenizerFactory> tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers,
analysisRegistry, environment);
List<CharFilterFactory> charFilterFactoryList = parseCharFilterFactories(request, indexSettings, analysisRegistry, environment);
List<CharFilterFactory> charFilterFactoryList =
parseCharFilterFactories(request, indexSettings, analysisRegistry, environment, false);
List<TokenFilterFactory> tokenFilterFactoryList = parseTokenFilterFactories(request, indexSettings, analysisRegistry,
environment, tokenizerFactory, charFilterFactoryList);
environment, tokenizerFactory, charFilterFactoryList, false);
analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(),
charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]),
tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()]));
closeAnalyzer = true;
} else if (request.normalizer() != null) {
// Get normalizer from indexAnalyzers
analyzer = indexAnalyzers.getNormalizer(request.normalizer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find normalizer under [" + request.normalizer() + "]");
}
} else if (((request.tokenFilters() != null && request.tokenFilters().size() > 0)
|| (request.charFilters() != null && request.charFilters().size() > 0))) {
final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings();
// custom normalizer = if normalizer == null but filter or char_filter is not null and tokenizer/analyzer is null
// get charfilter and filter from request
List<CharFilterFactory> charFilterFactoryList =
parseCharFilterFactories(request, indexSettings, analysisRegistry, environment, true);
final String keywordTokenizerName = "keyword";
TokenizerFactory keywordTokenizerFactory = getTokenizerFactory(analysisRegistry, environment, keywordTokenizerName);
List<TokenFilterFactory> tokenFilterFactoryList =
parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, new Tuple<>(keywordTokenizerName, keywordTokenizerFactory), charFilterFactoryList, true);
analyzer = new CustomAnalyzer("keyword_for_normalizer",
keywordTokenizerFactory,
charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]),
tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()]));
closeAnalyzer = true;
} else if (analyzer == null) {
if (indexAnalyzers == null) {
analyzer = analysisRegistry.getAnalyzer("standard");
@ -465,7 +492,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
}
private static List<CharFilterFactory> parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment) throws IOException {
Environment environment, boolean normalizer) throws IOException {
List<CharFilterFactory> charFilterFactoryList = new ArrayList<>();
if (request.charFilters() != null && request.charFilters().size() > 0) {
List<AnalyzeRequest.NameOrDefinition> charFilters = request.charFilters();
@ -506,6 +533,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
if (charFilterFactory == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
if (normalizer) {
if (charFilterFactory instanceof MultiTermAwareComponent == false) {
throw new IllegalArgumentException("Custom normalizer may not use char filter ["
+ charFilterFactory.name() + "]");
}
charFilterFactory = (CharFilterFactory) ((MultiTermAwareComponent) charFilterFactory).getMultiTermComponent();
}
charFilterFactoryList.add(charFilterFactory);
}
}
@ -514,7 +548,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
private static List<TokenFilterFactory> parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, Tuple<String, TokenizerFactory> tokenizerFactory,
List<CharFilterFactory> charFilterFactoryList) throws IOException {
List<CharFilterFactory> charFilterFactoryList, boolean normalizer) throws IOException {
List<TokenFilterFactory> tokenFilterFactoryList = new ArrayList<>();
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
List<AnalyzeRequest.NameOrDefinition> tokenFilters = request.tokenFilters();
@ -561,6 +595,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
if (tokenFilterFactory == null) {
throw new IllegalArgumentException("failed to find or create token filter under [" + tokenFilter.name + "]");
}
if (normalizer) {
if (tokenFilterFactory instanceof MultiTermAwareComponent == false) {
throw new IllegalArgumentException("Custom normalizer may not use filter ["
+ tokenFilterFactory.name() + "]");
}
tokenFilterFactory = (TokenFilterFactory) ((MultiTermAwareComponent) tokenFilterFactory).getMultiTermComponent();
}
tokenFilterFactoryList.add(tokenFilterFactory);
}
}
@ -590,12 +631,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
} else {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
if (indexAnalzyers == null) {
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
}
tokenizerFactory = getTokenizerFactory(analysisRegistry, environment, tokenizer.name);
name = tokenizer.name;
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
} else {
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, indexAnalzyers.getIndexSettings());
if (tokenizerFactoryFactory == null) {
@ -610,6 +647,17 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return new Tuple<>(name, tokenizerFactory);
}
private static TokenizerFactory getTokenizerFactory(AnalysisRegistry analysisRegistry, Environment environment, String name) throws IOException {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
TokenizerFactory tokenizerFactory;
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(name);
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + name + "]");
}
tokenizerFactory = tokenizerFactoryFactory.get(environment, name);
return tokenizerFactory;
}
private static IndexSettings getNaIndexSettings(Settings settings) {
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
return new IndexSettings(metaData, Settings.EMPTY);

View File

@ -19,9 +19,7 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@ -78,37 +76,12 @@ public class BulkItemRequest implements Streamable {
if (in.readBoolean()) {
primaryResponse = BulkItemResponse.readBulkItem(in);
}
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
boolean ignoreOnReplica = in.readBoolean();
if (ignoreOnReplica == false && primaryResponse != null) {
assert primaryResponse.isFailed() == false : "expected no failure on the primary response";
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
// old nodes expect updated version and version type on the request
if (primaryResponse != null) {
request.version(primaryResponse.getVersion());
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
DocWriteRequest.writeDocumentRequest(out, request);
} else {
DocWriteRequest.writeDocumentRequest(out, request);
}
} else {
DocWriteRequest.writeDocumentRequest(out, request);
}
DocWriteRequest.writeDocumentRequest(out, request);
out.writeOptionalStreamable(primaryResponse);
if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
if (primaryResponse != null) {
out.writeBoolean(primaryResponse.isFailed()
|| primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP);
} else {
out.writeBoolean(false);
}
}
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.util.SparseFixedBitSet;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.RoutingMissingException;
@ -289,13 +290,11 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
case CREATE:
case INDEX:
IndexRequest indexRequest = (IndexRequest) docWriteRequest;
MappingMetaData mappingMd = null;
final IndexMetaData indexMetaData = metaData.index(concreteIndex);
if (indexMetaData != null) {
mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
}
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
Version indexCreated = indexMetaData.getCreationVersion();
indexRequest.resolveRouting(metaData);
indexRequest.process(mappingMd, concreteIndex.getName());
indexRequest.process(indexCreated, mappingMd, concreteIndex.getName());
break;
case UPDATE:
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest);

View File

@ -350,7 +350,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
case UPDATED:
IndexRequest indexRequest = translate.action();
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
indexRequest.process(mappingMd, concreteIndex);
indexRequest.process(metaData.getCreationVersion(), mappingMd, concreteIndex);
result = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
break;
case DELETED:

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.delete;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@ -37,8 +36,6 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpect
*/
public class DeleteResponse extends DocWriteResponse {
private static final String FOUND = "found";
public DeleteResponse() {
}
@ -64,13 +61,6 @@ public class DeleteResponse extends DocWriteResponse {
return builder.append("]").toString();
}
@Override
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(FOUND, result == Result.DELETED);
super.innerToXContent(builder, params);
return builder;
}
public static DeleteResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
@ -85,16 +75,7 @@ public class DeleteResponse extends DocWriteResponse {
* Parse the current token and update the parsing context appropriately.
*/
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
String currentFieldName = parser.currentName();
if (FOUND.equals(currentFieldName)) {
if (token.isValue()) {
context.setFound(parser.booleanValue());
}
} else {
DocWriteResponse.parseInnerToXContent(parser, context);
}
DocWriteResponse.parseInnerToXContent(parser, context);
}
/**
@ -104,15 +85,10 @@ public class DeleteResponse extends DocWriteResponse {
*/
public static class Builder extends DocWriteResponse.Builder {
private boolean found = false;
public void setFound(boolean found) {
this.found = found;
}
@Override
public DeleteResponse build() {
DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version, found);
DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version,
result == Result.DELETED ? true : false);
deleteResponse.setForcedRefresh(forcedRefresh);
if (shardInfo != null) {
deleteResponse.setShardInfo(shardInfo);

View File

@ -1,789 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Objects;
public abstract class FieldStats<T> implements Writeable, ToXContent {
private final byte type;
private long maxDoc;
private long docCount;
private long sumDocFreq;
private long sumTotalTermFreq;
private boolean isSearchable;
private boolean isAggregatable;
private boolean hasMinMax;
protected T minValue;
protected T maxValue;
/**
* Builds a FieldStats where min and max value are not available for the field.
* @param type The native type of this FieldStats
* @param maxDoc Max number of docs
* @param docCount the number of documents that have at least one term for this field,
* or -1 if this information isn't available for this field.
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
* or -1 if this information isn't available for this field.
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
* or -1 if this measure isn't available for this field.
* @param isSearchable true if this field is searchable
* @param isAggregatable true if this field is aggregatable
*/
FieldStats(byte type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
this.type = type;
this.maxDoc = maxDoc;
this.docCount = docCount;
this.sumDocFreq = sumDocFreq;
this.sumTotalTermFreq = sumTotalTermFreq;
this.isSearchable = isSearchable;
this.isAggregatable = isAggregatable;
this.hasMinMax = false;
}
/**
* Builds a FieldStats with min and max value for the field.
* @param type The native type of this FieldStats
* @param maxDoc Max number of docs
* @param docCount the number of documents that have at least one term for this field,
* or -1 if this information isn't available for this field.
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
* or -1 if this information isn't available for this field.
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
* or -1 if this measure isn't available for this field.
* @param isSearchable true if this field is searchable
* @param isAggregatable true if this field is aggregatable
* @param minValue the minimum value indexed in this field
* @param maxValue the maximum value indexed in this field
*/
FieldStats(byte type,
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
Objects.requireNonNull(minValue, "minValue must not be null");
Objects.requireNonNull(maxValue, "maxValue must not be null");
this.type = type;
this.maxDoc = maxDoc;
this.docCount = docCount;
this.sumDocFreq = sumDocFreq;
this.sumTotalTermFreq = sumTotalTermFreq;
this.isSearchable = isSearchable;
this.isAggregatable = isAggregatable;
this.hasMinMax = true;
this.minValue = minValue;
this.maxValue = maxValue;
}
byte getType() {
return this.type;
}
public String getDisplayType() {
switch (type) {
case 0:
return "integer";
case 1:
return "float";
case 2:
return "date";
case 3:
return "string";
case 4:
return "ip";
case 5:
return "geo_point";
default:
throw new IllegalArgumentException("Unknown type.");
}
}
/**
* @return true if min/max information is available for this field
*/
public boolean hasMinMax() {
return hasMinMax;
}
/**
* @return the total number of documents.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public long getMaxDoc() {
return maxDoc;
}
/**
* @return the number of documents that have at least one term for this field,
* or -1 if this measurement isn't available.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public long getDocCount() {
return docCount;
}
/**
* @return The percentage of documents that have at least one value for this field.
*
* This is a derived statistic and is based on: 'doc_count / max_doc'
*/
public int getDensity() {
if (docCount < 0 || maxDoc <= 0) {
return -1;
}
return (int) (docCount * 100 / maxDoc);
}
/**
* @return the sum of each term's document frequency in this field, or -1 if this measurement isn't available.
* Document frequency is the number of documents containing a particular term.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public long getSumDocFreq() {
return sumDocFreq;
}
/**
* @return the sum of the term frequencies of all terms in this field across all documents,
* or -1 if this measurement
* isn't available. Term frequency is the total number of occurrences of a term in a particular document and field.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public long getSumTotalTermFreq() {
return sumTotalTermFreq;
}
/**
* @return <code>true</code> if any of the instances of the field name is searchable.
*/
public boolean isSearchable() {
return isSearchable;
}
/**
* @return <code>true</code> if any of the instances of the field name is aggregatable.
*/
public boolean isAggregatable() {
return isAggregatable;
}
/**
* @return the lowest value in the field.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public T getMinValue() {
return minValue;
}
/**
* @return the highest value in the field.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public T getMaxValue() {
return maxValue;
}
/**
* @return the lowest value in the field represented as a string.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public abstract String getMinValueAsString();
/**
* @return the highest value in the field represented as a string.
*
* Note that, documents marked as deleted that haven't yet been merged way aren't taken into account.
*/
public abstract String getMaxValueAsString();
/**
* @param value The string to be parsed
* @param optionalFormat A string describing how to parse the specified value. Whether this parameter is supported
* depends on the implementation. If optionalFormat is specified and the implementation
* doesn't support it an {@link UnsupportedOperationException} is thrown
*/
protected abstract T valueOf(String value, String optionalFormat);
/**
* Accumulates the provided stats into this stats instance.
*/
public final void accumulate(FieldStats other) {
this.maxDoc += other.maxDoc;
if (other.docCount == -1) {
this.docCount = -1;
} else if (this.docCount != -1) {
this.docCount += other.docCount;
}
if (other.sumDocFreq == -1) {
this.sumDocFreq = -1;
} else if (this.sumDocFreq != -1) {
this.sumDocFreq += other.sumDocFreq;
}
if (other.sumTotalTermFreq == -1) {
this.sumTotalTermFreq = -1;
} else if (this.sumTotalTermFreq != -1) {
this.sumTotalTermFreq += other.sumTotalTermFreq;
}
isSearchable |= other.isSearchable;
isAggregatable |= other.isAggregatable;
assert type == other.getType();
if (hasMinMax && other.hasMinMax) {
updateMinMax((T) other.minValue, (T) other.maxValue);
} else {
hasMinMax = false;
minValue = null;
maxValue = null;
}
}
protected void updateMinMax(T min, T max) {
if (compare(minValue, min) > 0) {
minValue = min;
}
if (compare(maxValue, max) < 0) {
maxValue = max;
}
}
protected abstract int compare(T o1, T o2);
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(TYPE_FIELD, getDisplayType());
builder.field(MAX_DOC_FIELD, maxDoc);
builder.field(DOC_COUNT_FIELD, docCount);
builder.field(DENSITY_FIELD, getDensity());
builder.field(SUM_DOC_FREQ_FIELD, sumDocFreq);
builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq);
builder.field(SEARCHABLE_FIELD, isSearchable);
builder.field(AGGREGATABLE_FIELD, isAggregatable);
if (hasMinMax) {
toInnerXContent(builder);
}
builder.endObject();
return builder;
}
protected void toInnerXContent(XContentBuilder builder) throws IOException {
builder.field(MIN_VALUE_FIELD, getMinValue());
builder.field(MIN_VALUE_AS_STRING_FIELD, getMinValueAsString());
builder.field(MAX_VALUE_FIELD, getMaxValue());
builder.field(MAX_VALUE_AS_STRING_FIELD, getMaxValueAsString());
}
@Override
public final void writeTo(StreamOutput out) throws IOException {
out.writeByte(type);
out.writeLong(maxDoc);
out.writeLong(docCount);
out.writeLong(sumDocFreq);
out.writeLong(sumTotalTermFreq);
out.writeBoolean(isSearchable);
out.writeBoolean(isAggregatable);
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeBoolean(hasMinMax);
if (hasMinMax) {
writeMinMax(out);
}
} else {
assert hasMinMax : "cannot serialize null min/max fieldstats in a mixed-cluster " +
"with pre-" + Version.V_5_2_0 + " nodes, remote version [" + out.getVersion() + "]";
writeMinMax(out);
}
}
protected abstract void writeMinMax(StreamOutput out) throws IOException;
/**
* @return <code>true</code> if this instance matches with the provided index constraint,
* otherwise <code>false</code> is returned
*/
public boolean match(IndexConstraint constraint) {
if (hasMinMax == false) {
return false;
}
int cmp;
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
cmp = compare(minValue, value);
} else if (constraint.getProperty() == IndexConstraint.Property.MAX) {
cmp = compare(maxValue, value);
} else {
throw new IllegalArgumentException("Unsupported property [" + constraint.getProperty() + "]");
}
switch (constraint.getComparison()) {
case GT:
return cmp > 0;
case GTE:
return cmp >= 0;
case LT:
return cmp < 0;
case LTE:
return cmp <= 0;
default:
throw new IllegalArgumentException("Unsupported comparison [" + constraint.getComparison() + "]");
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FieldStats<?> that = (FieldStats<?>) o;
if (type != that.type) return false;
if (maxDoc != that.maxDoc) return false;
if (docCount != that.docCount) return false;
if (sumDocFreq != that.sumDocFreq) return false;
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
if (isSearchable != that.isSearchable) return false;
if (isAggregatable != that.isAggregatable) return false;
if (hasMinMax != that.hasMinMax) return false;
if (hasMinMax == false) {
return true;
}
if (!minValue.equals(that.minValue)) return false;
return maxValue.equals(that.maxValue);
}
@Override
public int hashCode() {
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
hasMinMax, minValue, maxValue);
}
public static class Long extends FieldStats<java.lang.Long> {
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
long minValue, long maxValue) {
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, minValue, maxValue);
}
@Override
public int compare(java.lang.Long o1, java.lang.Long o2) {
return o1.compareTo(o2);
}
@Override
public void writeMinMax(StreamOutput out) throws IOException {
out.writeLong(minValue);
out.writeLong(maxValue);
}
@Override
public java.lang.Long valueOf(String value, String optionalFormat) {
return java.lang.Long.parseLong(value);
}
@Override
public String getMinValueAsString() {
return java.lang.Long.toString(minValue);
}
@Override
public String getMaxValueAsString() {
return java.lang.Long.toString(maxValue);
}
}
public static class Double extends FieldStats<java.lang.Double> {
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
}
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
double minValue, double maxValue) {
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
minValue, maxValue);
}
@Override
public int compare(java.lang.Double o1, java.lang.Double o2) {
return o1.compareTo(o2);
}
@Override
public void writeMinMax(StreamOutput out) throws IOException {
out.writeDouble(minValue);
out.writeDouble(maxValue);
}
@Override
public java.lang.Double valueOf(String value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
return java.lang.Double.parseDouble(value);
}
@Override
public String getMinValueAsString() {
return java.lang.Double.toString(minValue);
}
@Override
public String getMaxValueAsString() {
return java.lang.Double.toString(maxValue);
}
}
public static class Date extends FieldStats<java.lang.Long> {
private FormatDateTimeFormatter formatter;
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
this.formatter = null;
}
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
FormatDateTimeFormatter formatter,
long minValue, long maxValue) {
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
minValue, maxValue);
this.formatter = formatter;
}
@Override
public int compare(java.lang.Long o1, java.lang.Long o2) {
return o1.compareTo(o2);
}
@Override
public void writeMinMax(StreamOutput out) throws IOException {
out.writeString(formatter.format());
out.writeLong(minValue);
out.writeLong(maxValue);
}
@Override
public java.lang.Long valueOf(String value, String fmt) {
FormatDateTimeFormatter f = formatter;
if (fmt != null) {
f = Joda.forPattern(fmt);
}
return f.parser().parseDateTime(value).getMillis();
}
@Override
public String getMinValueAsString() {
return formatter.printer().print(minValue);
}
@Override
public String getMaxValueAsString() {
return formatter.printer().print(maxValue);
}
@Override
public boolean equals(Object o) {
if (!super.equals(o)) return false;
Date that = (Date) o;
return Objects.equals(formatter == null ? null : formatter.format(),
that.formatter == null ? null : that.formatter.format());
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (formatter == null ? 0 : formatter.format().hashCode());
return result;
}
}
public static class Text extends FieldStats<BytesRef> {
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
BytesRef minValue, BytesRef maxValue) {
super((byte) 3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable,
minValue, maxValue);
}
@Override
public int compare(BytesRef o1, BytesRef o2) {
return o1.compareTo(o2);
}
@Override
public void writeMinMax(StreamOutput out) throws IOException {
out.writeBytesRef(minValue);
out.writeBytesRef(maxValue);
}
@Override
protected BytesRef valueOf(String value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
return new BytesRef(value);
}
@Override
public String getMinValueAsString() {
return minValue.utf8ToString();
}
@Override
public String getMaxValueAsString() {
return maxValue.utf8ToString();
}
@Override
protected void toInnerXContent(XContentBuilder builder) throws IOException {
builder.field(MIN_VALUE_FIELD, getMinValueAsString());
builder.field(MAX_VALUE_FIELD, getMaxValueAsString());
}
}
public static class Ip extends FieldStats<InetAddress> {
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
InetAddress minValue, InetAddress maxValue) {
super((byte) 4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable,
minValue, maxValue);
}
@Override
public int compare(InetAddress o1, InetAddress o2) {
byte[] b1 = InetAddressPoint.encode(o1);
byte[] b2 = InetAddressPoint.encode(o2);
return StringHelper.compare(b1.length, b1, 0, b2, 0);
}
@Override
public void writeMinMax(StreamOutput out) throws IOException {
byte[] b1 = InetAddressPoint.encode(minValue);
byte[] b2 = InetAddressPoint.encode(maxValue);
out.writeByte((byte) b1.length);
out.writeBytes(b1);
out.writeByte((byte) b2.length);
out.writeBytes(b2);
}
@Override
public InetAddress valueOf(String value, String fmt) {
return InetAddresses.forString(value);
}
@Override
public String getMinValueAsString() {
return NetworkAddress.format(minValue);
}
@Override
public String getMaxValueAsString() {
return NetworkAddress.format(maxValue);
}
}
public static class GeoPoint extends FieldStats<org.elasticsearch.common.geo.GeoPoint> {
public GeoPoint(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 5, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public GeoPoint(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
org.elasticsearch.common.geo.GeoPoint minValue, org.elasticsearch.common.geo.GeoPoint maxValue) {
super((byte) 5, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
minValue, maxValue);
}
@Override
public org.elasticsearch.common.geo.GeoPoint valueOf(String value, String fmt) {
return org.elasticsearch.common.geo.GeoPoint.parseFromLatLon(value);
}
@Override
protected void updateMinMax(org.elasticsearch.common.geo.GeoPoint min, org.elasticsearch.common.geo.GeoPoint max) {
minValue.reset(Math.min(min.lat(), minValue.lat()), Math.min(min.lon(), minValue.lon()));
maxValue.reset(Math.max(max.lat(), maxValue.lat()), Math.max(max.lon(), maxValue.lon()));
}
@Override
public int compare(org.elasticsearch.common.geo.GeoPoint p1, org.elasticsearch.common.geo.GeoPoint p2) {
throw new IllegalArgumentException("compare is not supported for geo_point field stats");
}
@Override
public void writeMinMax(StreamOutput out) throws IOException {
out.writeDouble(minValue.lat());
out.writeDouble(minValue.lon());
out.writeDouble(maxValue.lat());
out.writeDouble(maxValue.lon());
}
@Override
public String getMinValueAsString() {
return minValue.toString();
}
@Override
public String getMaxValueAsString() {
return maxValue.toString();
}
}
public static FieldStats readFrom(StreamInput in) throws IOException {
byte type = in.readByte();
long maxDoc = in.readLong();
long docCount = in.readLong();
long sumDocFreq = in.readLong();
long sumTotalTermFreq = in.readLong();
boolean isSearchable = in.readBoolean();
boolean isAggregatable = in.readBoolean();
boolean hasMinMax = true;
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
hasMinMax = in.readBoolean();
}
switch (type) {
case 0:
if (hasMinMax) {
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readLong(), in.readLong());
} else {
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 1:
if (hasMinMax) {
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
} else {
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 2:
if (hasMinMax) {
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
} else {
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 3:
if (hasMinMax) {
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
} else {
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 4: {
if (hasMinMax == false) {
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
int l1 = in.readByte();
byte[] b1 = new byte[l1];
in.readBytes(b1, 0, l1);
int l2 = in.readByte();
byte[] b2 = new byte[l2];
in.readBytes(b2, 0, l2);
InetAddress min = InetAddressPoint.decode(b1);
InetAddress max = InetAddressPoint.decode(b2);
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, min, max);
}
case 5: {
if (hasMinMax == false) {
return new GeoPoint(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
org.elasticsearch.common.geo.GeoPoint min = new org.elasticsearch.common.geo.GeoPoint(in.readDouble(), in.readDouble());
org.elasticsearch.common.geo.GeoPoint max = new org.elasticsearch.common.geo.GeoPoint(in.readDouble(), in.readDouble());
return new GeoPoint(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, min, max);
}
default:
throw new IllegalArgumentException("Unknown type.");
}
}
private static final String TYPE_FIELD = "type";
private static final String MAX_DOC_FIELD = "max_doc";
private static final String DOC_COUNT_FIELD = "doc_count";
private static final String DENSITY_FIELD = "density";
private static final String SUM_DOC_FREQ_FIELD = "sum_doc_freq";
private static final String SUM_TOTAL_TERM_FREQ_FIELD = "sum_total_term_freq";
private static final String SEARCHABLE_FIELD = "searchable";
private static final String AGGREGATABLE_FIELD = "aggregatable";
private static final String MIN_VALUE_FIELD = "min_value";
private static final String MIN_VALUE_AS_STRING_FIELD = "min_value_as_string";
private static final String MAX_VALUE_FIELD = "max_value";
private static final String MAX_VALUE_AS_STRING_FIELD = "max_value_as_string";
}

View File

@ -1,207 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
public static final String DEFAULT_LEVEL = "cluster";
private String[] fields = Strings.EMPTY_ARRAY;
private String level = DEFAULT_LEVEL;
private IndexConstraint[] indexConstraints = new IndexConstraint[0];
private boolean useCache = true;
public String[] getFields() {
return fields;
}
public void setFields(String[] fields) {
if (fields == null) {
throw new NullPointerException("specified fields can't be null");
}
this.fields = fields;
}
public void setUseCache(boolean useCache) {
this.useCache = useCache;
}
public boolean shouldUseCache() {
return useCache;
}
public IndexConstraint[] getIndexConstraints() {
return indexConstraints;
}
public void setIndexConstraints(IndexConstraint[] indexConstraints) {
if (indexConstraints == null) {
throw new NullPointerException("specified index_constraints can't be null");
}
this.indexConstraints = indexConstraints;
}
public void source(XContentParser parser) throws IOException {
List<IndexConstraint> indexConstraints = new ArrayList<>();
List<String> fields = new ArrayList<>();
String fieldName = null;
Token token = parser.nextToken();
assert token == Token.START_OBJECT;
for (token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME:
fieldName = parser.currentName();
break;
case START_OBJECT:
if ("index_constraints".equals(fieldName)) {
parseIndexConstraints(indexConstraints, parser);
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
case START_ARRAY:
if ("fields".equals(fieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue()) {
fields.add(parser.text());
} else {
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
}
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
default:
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
}
this.fields = fields.toArray(new String[fields.size()]);
this.indexConstraints = indexConstraints.toArray(new IndexConstraint[indexConstraints.size()]);
}
private static void parseIndexConstraints(List<IndexConstraint> indexConstraints,
XContentParser parser) throws IOException {
Token token = parser.currentToken();
assert token == Token.START_OBJECT;
String field = null;
String currentName = null;
for (token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
if (token == Token.FIELD_NAME) {
field = currentName = parser.currentName();
} else if (token == Token.START_OBJECT) {
for (Token fieldToken = parser.nextToken();
fieldToken != Token.END_OBJECT; fieldToken = parser.nextToken()) {
if (fieldToken == Token.FIELD_NAME) {
currentName = parser.currentName();
} else if (fieldToken == Token.START_OBJECT) {
IndexConstraint.Property property = IndexConstraint.Property.parse(currentName);
String value = null;
String optionalFormat = null;
IndexConstraint.Comparison comparison = null;
for (Token propertyToken = parser.nextToken();
propertyToken != Token.END_OBJECT; propertyToken = parser.nextToken()) {
if (propertyToken.isValue()) {
if ("format".equals(parser.currentName())) {
optionalFormat = parser.text();
} else {
comparison = IndexConstraint.Comparison.parse(parser.currentName());
value = parser.text();
}
} else {
if (propertyToken != Token.FIELD_NAME) {
throw new IllegalArgumentException("unexpected token [" + propertyToken + "]");
}
}
}
indexConstraints.add(new IndexConstraint(field, property, comparison, value, optionalFormat));
} else {
throw new IllegalArgumentException("unexpected token [" + fieldToken + "]");
}
}
} else {
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
}
}
public String level() {
return level;
}
public void level(String level) {
this.level = level;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if ("cluster".equals(level) == false && "indices".equals(level) == false) {
validationException =
ValidateActions.addValidationError("invalid level option [" + level + "]", validationException);
}
if (fields == null || fields.length == 0) {
validationException = ValidateActions.addValidationError("no fields specified", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
fields = in.readStringArray();
int size = in.readVInt();
indexConstraints = new IndexConstraint[size];
for (int i = 0; i < size; i++) {
indexConstraints[i] = new IndexConstraint(in);
}
level = in.readString();
useCache = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(fields);
out.writeVInt(indexConstraints.length);
for (IndexConstraint indexConstraint : indexConstraints) {
out.writeString(indexConstraint.getField());
out.writeByte(indexConstraint.getProperty().getId());
out.writeByte(indexConstraint.getComparison().getId());
out.writeString(indexConstraint.getValue());
out.writeOptionalString(indexConstraint.getOptionalFormat());
}
out.writeString(level);
out.writeBoolean(useCache);
}
}

View File

@ -1,51 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
public class FieldStatsRequestBuilder extends
BroadcastOperationRequestBuilder<FieldStatsRequest, FieldStatsResponse, FieldStatsRequestBuilder> {
public FieldStatsRequestBuilder(ElasticsearchClient client, FieldStatsAction action) {
super(client, action, new FieldStatsRequest());
}
public FieldStatsRequestBuilder setFields(String... fields) {
request().setFields(fields);
return this;
}
public FieldStatsRequestBuilder setIndexContraints(IndexConstraint... fields) {
request().setIndexConstraints(fields);
return this;
}
public FieldStatsRequestBuilder setLevel(String level) {
request().level(level);
return this;
}
public FieldStatsRequestBuilder setUseCache(boolean useCache) {
request().setUseCache(useCache);
return this;
}
}

View File

@ -1,118 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class FieldStatsResponse extends BroadcastResponse {
private Map<String, Map<String, FieldStats>> indicesMergedFieldStats;
private Map<String, String> conflicts;
public FieldStatsResponse() {
}
public FieldStatsResponse(int totalShards, int successfulShards, int failedShards,
List<ShardOperationFailedException> shardFailures,
Map<String, Map<String, FieldStats>> indicesMergedFieldStats,
Map<String, String> conflicts) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.indicesMergedFieldStats = indicesMergedFieldStats;
this.conflicts = conflicts;
}
@Nullable
public Map<String, FieldStats> getAllFieldStats() {
return indicesMergedFieldStats.get("_all");
}
public Map<String, String> getConflicts() {
return conflicts;
}
public Map<String, Map<String, FieldStats>> getIndicesMergedFieldStats() {
return indicesMergedFieldStats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
indicesMergedFieldStats = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
int indexSize = in.readVInt();
Map<String, FieldStats> indexFieldStats = new HashMap<>(indexSize);
indicesMergedFieldStats.put(key, indexFieldStats);
for (int j = 0; j < indexSize; j++) {
key = in.readString();
FieldStats value = FieldStats.readFrom(in);
indexFieldStats.put(key, value);
}
}
size = in.readVInt();
conflicts = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
String value = in.readString();
conflicts.put(key, value);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(indicesMergedFieldStats.size());
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
out.writeString(entry1.getKey());
int size = entry1.getValue().size();
if (out.getVersion().before(Version.V_5_2_0)) {
// filter fieldstats without min/max information
for (FieldStats stats : entry1.getValue().values()) {
if (stats.hasMinMax() == false) {
size--;
}
}
}
out.writeVInt(size);
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
}
}
}
out.writeVInt(conflicts.size());
for (Map.Entry<String, String> entry : conflicts.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
}
}

View File

@ -1,72 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
public class FieldStatsShardRequest extends BroadcastShardRequest {
private String[] fields;
private boolean useCache;
public FieldStatsShardRequest() {
}
public FieldStatsShardRequest(ShardId shardId, FieldStatsRequest request) {
super(shardId, request);
Set<String> fields = new HashSet<>(Arrays.asList(request.getFields()));
for (IndexConstraint indexConstraint : request.getIndexConstraints()) {
fields.add(indexConstraint.getField());
}
this.fields = fields.toArray(new String[fields.size()]);
useCache = request.shouldUseCache();
}
public String[] getFields() {
return fields;
}
public boolean shouldUseCache() {
return useCache;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
fields = in.readStringArray();
useCache = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(fields);
out.writeBoolean(useCache);
}
}

View File

@ -1,85 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
public class FieldStatsShardResponse extends BroadcastShardResponse {
private Map<String, FieldStats<?>> fieldStats;
public FieldStatsShardResponse() {
}
public FieldStatsShardResponse(ShardId shardId, Map<String, FieldStats<?>> fieldStats) {
super(shardId);
this.fieldStats = fieldStats;
}
public Map<String, FieldStats<?>> getFieldStats() {
return fieldStats;
}
Map<String, FieldStats<?> > filterNullMinMax() {
return fieldStats.entrySet().stream()
.filter((e) -> e.getValue().hasMinMax())
.collect(Collectors.toMap(p -> p.getKey(), p -> p.getValue()));
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
final int size = in.readVInt();
fieldStats = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
FieldStats value = FieldStats.readFrom(in);
fieldStats.put(key, value);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
final Map<String, FieldStats<?> > stats;
if (out.getVersion().before(Version.V_5_2_0)) {
/**
* FieldStats with null min/max are not (de)serializable in versions prior to {@link Version.V_5_2_0_UNRELEASED}
*/
stats = filterNullMinMax();
} else {
stats = getFieldStats();
}
out.writeVInt(stats.size());
for (Map.Entry<String, FieldStats<?>> entry : stats.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
}
}

View File

@ -1,183 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
public class IndexConstraint {
private final String field;
private final Property property;
private final Comparison comparison;
private final String value;
private final String optionalFormat;
IndexConstraint(StreamInput input) throws IOException {
this.field = input.readString();
this.property = Property.read(input.readByte());
this.comparison = Comparison.read(input.readByte());
this.value = input.readString();
this.optionalFormat = input.readOptionalString();
}
public IndexConstraint(String field, Property property, Comparison comparison, String value) {
this(field, property, comparison, value, null);
}
public IndexConstraint(String field, Property property,
Comparison comparison, String value, String optionalFormat) {
this.field = Objects.requireNonNull(field);
this.property = Objects.requireNonNull(property);
this.comparison = Objects.requireNonNull(comparison);
this.value = Objects.requireNonNull(value);
this.optionalFormat = optionalFormat;
}
/**
* @return On what field the constraint is going to be applied on
*/
public String getField() {
return field;
}
/**
* @return How to compare the specified value against the field property (lt, lte, gt and gte)
*/
public Comparison getComparison() {
return comparison;
}
/**
* @return On what property of a field the constraint is going to be applied on (min or max value)
*/
public Property getProperty() {
return property;
}
/**
* @return The value to compare against
*/
public String getValue() {
return value;
}
/**
* @return An optional format, that specifies how the value string is converted in the native value of the field.
* Not all field types support this and right now only date field supports this option.
*/
public String getOptionalFormat() {
return optionalFormat;
}
public enum Property {
MIN((byte) 0),
MAX((byte) 1);
private final byte id;
Property(byte id) {
this.id = id;
}
public byte getId() {
return id;
}
public static Property read(byte id) {
switch (id) {
case 0:
return MIN;
case 1:
return MAX;
default:
throw new IllegalArgumentException("Unknown property [" + id + "]");
}
}
public static Property parse(String value) {
value = value.toLowerCase(Locale.ROOT);
switch (value) {
case "min_value":
return MIN;
case "max_value":
return MAX;
default:
throw new IllegalArgumentException("Unknown property [" + value + "]");
}
}
}
public enum Comparison {
LT((byte) 0),
LTE((byte) 1),
GT((byte) 2),
GTE((byte) 3);
private final byte id;
Comparison(byte id) {
this.id = id;
}
public byte getId() {
return id;
}
public static Comparison read(byte id) {
switch (id) {
case 0:
return LT;
case 1:
return LTE;
case 2:
return GT;
case 3:
return GTE;
default:
throw new IllegalArgumentException("Unknown comparison [" + id + "]");
}
}
public static Comparison parse(String value) {
value = value.toLowerCase(Locale.ROOT);
switch (value) {
case "lt":
return LT;
case "lte":
return LTE;
case "gt":
return GT;
case "gte":
return GTE;
default:
throw new IllegalArgumentException("Unknown comparison [" + value + "]");
}
}
}
}

View File

@ -1,223 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray;
public class TransportFieldStatsAction extends
TransportBroadcastAction<FieldStatsRequest, FieldStatsResponse, FieldStatsShardRequest, FieldStatsShardResponse> {
private final IndicesService indicesService;
@Inject
public TransportFieldStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
IndicesService indicesService) {
super(settings, FieldStatsAction.NAME, threadPool, clusterService, transportService,
actionFilters, indexNameExpressionResolver, FieldStatsRequest::new,
FieldStatsShardRequest::new, ThreadPool.Names.MANAGEMENT);
this.indicesService = indicesService;
}
@Override
protected FieldStatsResponse newResponse(FieldStatsRequest request, AtomicReferenceArray shardsResponses,
ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
Map<String, String> conflicts = new HashMap<>();
Map<String, Map<String, FieldStats>> indicesMergedFieldStats = new HashMap<>();
List<ShardOperationFailedException> shardFailures = new ArrayList<>();
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardValue = shardsResponses.get(i);
if (shardValue == null) {
// simply ignore non active shards
} else if (shardValue instanceof BroadcastShardOperationFailedException) {
failedShards++;
shardFailures.add(
new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardValue)
);
} else {
successfulShards++;
FieldStatsShardResponse shardResponse = (FieldStatsShardResponse) shardValue;
final String indexName;
if ("cluster".equals(request.level())) {
indexName = "_all";
} else if ("indices".equals(request.level())) {
indexName = shardResponse.getIndex();
} else {
// should already have been caught by the FieldStatsRequest#validate(...)
throw new IllegalArgumentException("Illegal level option [" + request.level() + "]");
}
Map<String, FieldStats> indexMergedFieldStats = indicesMergedFieldStats.get(indexName);
if (indexMergedFieldStats == null) {
indicesMergedFieldStats.put(indexName, indexMergedFieldStats = new HashMap<>());
}
Map<String, FieldStats<?>> fieldStats = shardResponse.getFieldStats();
for (Map.Entry<String, FieldStats<?>> entry : fieldStats.entrySet()) {
FieldStats<?> existing = indexMergedFieldStats.get(entry.getKey());
if (existing != null) {
if (existing.getType() != entry.getValue().getType()) {
if (conflicts.containsKey(entry.getKey()) == false) {
FieldStats[] fields = new FieldStats[] {entry.getValue(), existing};
Arrays.sort(fields, (o1, o2) -> Byte.compare(o1.getType(), o2.getType()));
conflicts.put(entry.getKey(),
"Field [" + entry.getKey() + "] of type [" +
fields[0].getDisplayType() +
"] conflicts with existing field of type [" +
fields[1].getDisplayType() +
"] in other index.");
}
} else {
existing.accumulate(entry.getValue());
}
} else {
indexMergedFieldStats.put(entry.getKey(), entry.getValue());
}
}
}
// Check the field with conflicts and remove them.
for (String conflictKey : conflicts.keySet()) {
Iterator<Map.Entry<String, Map<String, FieldStats>>> iterator =
indicesMergedFieldStats.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Map<String, FieldStats>> entry = iterator.next();
if (entry.getValue().containsKey(conflictKey)) {
entry.getValue().remove(conflictKey);
}
}
}
}
if (request.getIndexConstraints().length != 0) {
Set<String> fieldStatFields = new HashSet<>(Arrays.asList(request.getFields()));
for (IndexConstraint indexConstraint : request.getIndexConstraints()) {
Iterator<Map.Entry<String, Map<String, FieldStats>>> iterator =
indicesMergedFieldStats.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Map<String, FieldStats>> entry = iterator.next();
FieldStats indexConstraintFieldStats = entry.getValue().get(indexConstraint.getField());
if (indexConstraintFieldStats != null && indexConstraintFieldStats.match(indexConstraint)) {
// If the field stats didn't occur in the list of fields in the original request
// we need to remove the field stats, because it was never requested and was only needed to
// validate the index constraint.
if (fieldStatFields.contains(indexConstraint.getField()) == false) {
entry.getValue().remove(indexConstraint.getField());
}
} else {
// The index constraint didn't match or was empty,
// so we remove all the field stats of the index we're checking.
iterator.remove();
}
}
}
}
return new FieldStatsResponse(shardsResponses.length(), successfulShards, failedShards,
shardFailures, indicesMergedFieldStats, conflicts);
}
@Override
protected FieldStatsShardRequest newShardRequest(int numShards, ShardRouting shard, FieldStatsRequest request) {
return new FieldStatsShardRequest(shard.shardId(), request);
}
@Override
protected FieldStatsShardResponse newShardResponse() {
return new FieldStatsShardResponse();
}
@Override
protected FieldStatsShardResponse shardOperation(FieldStatsShardRequest request) {
ShardId shardId = request.shardId();
Map<String, FieldStats<?>> fieldStats = new HashMap<>();
IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard shard = indexServices.getShard(shardId.id());
try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) {
// Resolve patterns and deduplicate
Set<String> fieldNames = new HashSet<>();
for (String field : request.getFields()) {
fieldNames.addAll(shard.mapperService().simpleMatchToIndexNames(field));
}
for (String field : fieldNames) {
FieldStats<?> stats = indicesService.getFieldStats(shard, searcher, field, request.shouldUseCache());
if (stats != null) {
fieldStats.put(field, stats);
}
}
} catch (Exception e) {
throw ExceptionsHelper.convertToElastic(e);
}
return new FieldStatsShardResponse(shardId, fieldStats);
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, FieldStatsRequest request,
String[] concreteIndices) {
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, null, null);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, FieldStatsRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, FieldStatsRequest request,
String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
}

View File

@ -68,7 +68,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
try {
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), concreteSingleIndex));
item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), item.index()));
if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type()))) {
String message = "routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]";
responses.set(i, newItemFailure(concreteSingleIndex, item.type(), item.id(), new IllegalArgumentException(message)));

View File

@ -43,6 +43,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.shard.ShardId;
@ -484,7 +485,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
public void process(@Nullable MappingMetaData mappingMd, String concreteIndex) {
public void process(Version indexCreatedVersion, @Nullable MappingMetaData mappingMd, String concreteIndex) {
if (mappingMd != null) {
// might as well check for routing here
if (mappingMd.routing().required() && routing == null) {
@ -508,7 +509,13 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
if (id == null) {
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
id(UUIDs.base64UUID());
String uid;
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) {
uid = UUIDs.base64UUID();
} else {
uid = UUIDs.legacyBase64UUID();
}
id(uid);
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.index;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@ -38,8 +37,6 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpect
*/
public class IndexResponse extends DocWriteResponse {
private static final String CREATED = "created";
public IndexResponse() {
}
@ -67,13 +64,6 @@ public class IndexResponse extends DocWriteResponse {
return builder.append("]").toString();
}
@Override
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
super.innerToXContent(builder, params);
builder.field(CREATED, result == Result.CREATED);
return builder;
}
public static IndexResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
@ -88,16 +78,7 @@ public class IndexResponse extends DocWriteResponse {
* Parse the current token and update the parsing context appropriately.
*/
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
String currentFieldName = parser.currentName();
if (CREATED.equals(currentFieldName)) {
if (token.isValue()) {
context.setCreated(parser.booleanValue());
}
} else {
DocWriteResponse.parseInnerToXContent(parser, context);
}
DocWriteResponse.parseInnerToXContent(parser, context);
}
/**
@ -107,15 +88,10 @@ public class IndexResponse extends DocWriteResponse {
*/
public static class Builder extends DocWriteResponse.Builder {
private boolean created = false;
public void setCreated(boolean created) {
this.created = created;
}
@Override
public IndexResponse build() {
IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version, created);
IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version,
result == Result.CREATED ? true : false);
indexResponse.setForcedRefresh(forcedRefresh);
if (shardInfo != null) {
indexResponse.setShardInfo(shardInfo);

View File

@ -162,18 +162,18 @@ public class SimulatePipelineRequest extends ActionRequest {
if (pipeline == null) {
throw new IllegalArgumentException("pipeline [" + pipelineId + "] does not exist");
}
List<IngestDocument> ingestDocumentList = parseDocs(config, pipelineStore.isNewIngestDateFormat());
List<IngestDocument> ingestDocumentList = parseDocs(config);
return new Parsed(pipeline, ingestDocumentList, verbose);
}
static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception {
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE);
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories());
List<IngestDocument> ingestDocumentList = parseDocs(config, pipelineStore.isNewIngestDateFormat());
List<IngestDocument> ingestDocumentList = parseDocs(config);
return new Parsed(pipeline, ingestDocumentList, verbose);
}
private static List<IngestDocument> parseDocs(Map<String, Object> config, boolean newDateFormat) {
private static List<IngestDocument> parseDocs(Map<String, Object> config) {
List<Map<String, Object>> docs = ConfigurationUtils.readList(null, null, config, Fields.DOCS);
List<IngestDocument> ingestDocumentList = new ArrayList<>();
for (Map<String, Object> dataMap : docs) {
@ -183,7 +183,7 @@ public class SimulatePipelineRequest extends ActionRequest {
ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.ID.getFieldName(), "_id"),
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.ROUTING.getFieldName()),
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.PARENT.getFieldName()),
document, newDateFormat);
document);
ingestDocumentList.add(ingestDocument);
}
return ingestDocumentList;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -27,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.ingest.IngestDocument;
import java.io.IOException;
import java.time.ZoneId;
import java.util.Date;
import java.util.Map;
import java.util.Objects;
@ -42,6 +45,12 @@ final class WriteableIngestDocument implements Writeable, ToXContent {
WriteableIngestDocument(StreamInput in) throws IOException {
Map<String, Object> sourceAndMetadata = in.readMap();
Map<String, Object> ingestMetadata = in.readMap();
if (in.getVersion().before(Version.V_6_0_0_beta1)) {
ingestMetadata.computeIfPresent("timestamp", (k, o) -> {
Date date = (Date) o;
return date.toInstant().atZone(ZoneId.systemDefault());
});
}
this.ingestDocument = new IngestDocument(sourceAndMetadata, ingestMetadata);
}

View File

@ -93,7 +93,7 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
super.sendReplicaRequest(replicaRequest, node, listener);
} else {
listener.onResponse(new ReplicaResponse(replicaRequest.getTargetAllocationID(), SequenceNumbersService.UNASSIGNED_SEQ_NO));
listener.onResponse(new ReplicaResponse(SequenceNumbersService.UNASSIGNED_SEQ_NO));
}
}

View File

@ -66,6 +66,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
private final SetOnce<AtomicArray<ShardSearchFailure>> shardFailures = new SetOnce<>();
private final Object shardFailuresMutex = new Object();
private final AtomicInteger successfulOps = new AtomicInteger();
private final AtomicInteger skippedOps = new AtomicInteger();
private final TransportSearchAction.SearchTimeProvider timeProvider;
@ -106,7 +107,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
if (getNumShards() == 0) {
//no search shards to search on, bail with empty response
//(it happens with search across _all with no indices around and consistent with broadcast operations)
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, 0, buildTookInMillis(),
ShardSearchFailure.EMPTY_ARRAY));
return;
}
@ -168,35 +169,35 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) {
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
if (TransportActions.isShardNotAvailableException(e)) {
return;
}
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
if (shardFailures == null) { // this is double checked locking but it's fine since SetOnce uses a volatile read internally
synchronized (shardFailuresMutex) {
shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it?
if (shardFailures == null) { // still null so we are the first and create a new instance
shardFailures = new AtomicArray<>(getNumShards());
this.shardFailures.set(shardFailures);
if (TransportActions.isShardNotAvailableException(e) == false) {
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
if (shardFailures == null) { // this is double checked locking but it's fine since SetOnce uses a volatile read internally
synchronized (shardFailuresMutex) {
shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it?
if (shardFailures == null) { // still null so we are the first and create a new instance
shardFailures = new AtomicArray<>(getNumShards());
this.shardFailures.set(shardFailures);
}
}
}
}
ShardSearchFailure failure = shardFailures.get(shardIndex);
if (failure == null) {
shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget));
} else {
// the failure is already present, try and not override it with an exception that is less meaningless
// for example, getting illegal shard state
if (TransportActions.isReadOverrideException(e)) {
ShardSearchFailure failure = shardFailures.get(shardIndex);
if (failure == null) {
shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget));
} else {
// the failure is already present, try and not override it with an exception that is less meaningless
// for example, getting illegal shard state
if (TransportActions.isReadOverrideException(e)) {
shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget));
}
}
if (results.hasResult(shardIndex)) {
assert failure == null : "shard failed before but shouldn't: " + failure;
successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter
}
}
if (results.hasResult(shardIndex)) {
assert failure == null : "shard failed before but shouldn't: " + failure;
successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter
}
results.consumeShardFailure(shardIndex);
}
/**
@ -263,7 +264,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
@Override
public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(),
buildTookInMillis(), buildShardFailures());
skippedOps.get(), buildTookInMillis(), buildShardFailures());
}
@Override
@ -297,11 +298,12 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
}
public final ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt) {
String clusterAlias = shardIt.getClusterAlias();
AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID());
assert filter != null;
float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST);
return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(),
filter, indexBoost, timeProvider.getAbsoluteStartMillis());
filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias);
}
/**
@ -311,4 +313,11 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
* @param context the search context for the next phase
*/
protected abstract SearchPhase getNextPhase(SearchPhaseResults<Result> results, SearchPhaseContext context);
@Override
protected void skipShard(SearchShardIterator iterator) {
super.skipShard(iterator);
successfulOps.incrementAndGet();
skippedOps.incrementAndGet();
}
}

View File

@ -0,0 +1,143 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.transport.Transport;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Stream;
/**
* This search phrase can be used as an initial search phase to pre-filter search shards based on query rewriting.
* The queries are rewritten against the shards and based on the rewrite result shards might be able to be excluded
* from the search. The extra round trip to the search shards is very cheap and is not subject to rejections
* which allows to fan out to more shards at the same time without running into rejections even if we are hitting a
* large portion of the clusters indices.
*/
final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<SearchTransportService.CanMatchResponse> {
private final Function<GroupShardsIterator<SearchShardIterator>, SearchPhase> phaseFactory;
private final GroupShardsIterator<SearchShardIterator> shardsIts;
CanMatchPreFilterSearchPhase(Logger logger, SearchTransportService searchTransportService,
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
SearchTask task, Function<GroupShardsIterator<SearchShardIterator>, SearchPhase> phaseFactory) {
super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request,
listener,
shardsIts, timeProvider, clusterStateVersion, task, new BitSetSearchPhaseResults(shardsIts.size()));
this.phaseFactory = phaseFactory;
this.shardsIts = shardsIts;
}
@Override
protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard,
SearchActionListener<SearchTransportService.CanMatchResponse> listener) {
getSearchTransport().sendCanMatch(getConnection(shardIt.getClusterAlias(), shard.currentNodeId()),
buildShardSearchRequest(shardIt), getTask(), listener);
}
@Override
protected SearchPhase getNextPhase(SearchPhaseResults<SearchTransportService.CanMatchResponse> results,
SearchPhaseContext context) {
return phaseFactory.apply(getIterator((BitSetSearchPhaseResults) results, shardsIts));
}
private GroupShardsIterator<SearchShardIterator> getIterator(BitSetSearchPhaseResults results,
GroupShardsIterator<SearchShardIterator> shardsIts) {
int cardinality = results.getNumPossibleMatches();
FixedBitSet possibleMatches = results.getPossibleMatches();
if (cardinality == 0) {
// this is a special case where we have no hit but we need to get at least one search response in order
// to produce a valid search result with all the aggs etc.
possibleMatches.set(0);
}
int i = 0;
for (SearchShardIterator iter : shardsIts) {
if (possibleMatches.get(i++)) {
iter.reset();
} else {
iter.resetAndSkip();
}
}
return shardsIts;
}
private static final class BitSetSearchPhaseResults extends InitialSearchPhase.
SearchPhaseResults<SearchTransportService.CanMatchResponse> {
private final FixedBitSet possibleMatches;
private int numPossibleMatches;
BitSetSearchPhaseResults(int size) {
super(size);
possibleMatches = new FixedBitSet(size);
}
@Override
void consumeResult(SearchTransportService.CanMatchResponse result) {
if (result.canMatch()) {
consumeShardFailure(result.getShardIndex());
}
}
@Override
boolean hasResult(int shardIndex) {
return false; // unneeded
}
@Override
synchronized void consumeShardFailure(int shardIndex) {
// we have to carry over shard failures in order to account for them in the response.
possibleMatches.set(shardIndex);
numPossibleMatches++;
}
synchronized int getNumPossibleMatches() {
return numPossibleMatches;
}
synchronized FixedBitSet getPossibleMatches() {
return possibleMatches;
}
@Override
Stream<SearchTransportService.CanMatchResponse> getSuccessfulResults() {
return Stream.empty();
}
}
}

View File

@ -41,16 +41,16 @@ import java.util.function.Function;
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
*/
final class DfsQueryPhase extends SearchPhase {
private final InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> queryResult;
private final InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult> queryResult;
private final SearchPhaseController searchPhaseController;
private final AtomicArray<DfsSearchResult> dfsSearchResults;
private final Function<InitialSearchPhase.SearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory;
private final Function<InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory;
private final SearchPhaseContext context;
private final SearchTransportService searchTransportService;
DfsQueryPhase(AtomicArray<DfsSearchResult> dfsSearchResults,
SearchPhaseController searchPhaseController,
Function<InitialSearchPhase.SearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory,
Function<InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory,
SearchPhaseContext context) {
super("dfs_query");
this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards());

View File

@ -69,7 +69,7 @@ final class FetchSearchPhase extends SearchPhase {
}
this.fetchResults = new AtomicArray<>(resultConsumer.getNumShards());
this.searchPhaseController = searchPhaseController;
this.queryResults = resultConsumer.results;
this.queryResults = resultConsumer.getAtomicArray();
this.nextPhaseFactory = nextPhaseFactory;
this.context = context;
this.logger = context.getLogger();
@ -105,7 +105,8 @@ final class FetchSearchPhase extends SearchPhase {
-> moveToNextPhase(searchPhaseController, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
queryResults : fetchResults);
if (queryAndFetchOptimization) {
assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null;
assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null : "phaseResults empty [" + phaseResults.isEmpty()
+ "], single result: " + phaseResults.get(0).fetchResult();
// query AND fetch optimization
finishPhase.run();
} else {

View File

@ -24,7 +24,6 @@ import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
@ -50,6 +49,8 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
private final Logger logger;
private final int expectedTotalOps;
private final AtomicInteger totalOps = new AtomicInteger();
private final AtomicInteger shardExecutionIndex = new AtomicInteger(0);
private final int maxConcurrentShardRequests;
InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator<SearchShardIterator> shardsIts, Logger logger) {
super(name);
@ -61,6 +62,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
// on a per shards level we use shardIt.remaining() to increment the totalOps pointer but add 1 for the current shard result
// we process hence we add one for the non active partition here.
this.expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
maxConcurrentShardRequests = Math.min(request.getMaxConcurrentShardRequests(), shardsIts.size());
}
private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
@ -105,6 +107,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, inner);
}
} else {
maybeExecuteNext(); // move to the next execution if needed
// no more shards active, add a failure
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
@ -124,23 +127,33 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
@Override
public final void run() throws IOException {
int shardIndex = -1;
for (final SearchShardIterator shardIt : shardsIts) {
shardIndex++;
final ShardRouting shard = shardIt.nextOrNull();
if (shard != null) {
performPhaseOnShard(shardIndex, shardIt, shard);
boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests);
assert success;
for (int i = 0; i < maxConcurrentShardRequests; i++) {
SearchShardIterator shardRoutings = shardsIts.get(i);
if (shardRoutings.skip()) {
skipShard(shardRoutings);
} else {
// really, no shards active in this group
onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
performPhaseOnShard(i, shardRoutings, shardRoutings.nextOrNull());
}
}
}
private void maybeExecuteNext() {
final int index = shardExecutionIndex.getAndIncrement();
if (index < shardsIts.size()) {
SearchShardIterator shardRoutings = shardsIts.get(index);
if (shardRoutings.skip()) {
skipShard(shardRoutings);
} else {
performPhaseOnShard(index, shardRoutings, shardRoutings.nextOrNull());
}
}
}
private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final ShardRouting shard) {
if (shard == null) {
// TODO upgrade this to an assert...
// no more active shards... (we should not really get here, but just for safety)
onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
} else {
try {
@ -165,7 +178,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
}
}
private void onShardResult(FirstResult result, ShardIterator shardIt) {
private void onShardResult(FirstResult result, SearchShardIterator shardIt) {
assert result.getShardIndex() != -1 : "shard index is not set";
assert result.getSearchShardTarget() != null : "search shard target must not be null";
onShardSuccess(result);
@ -174,12 +187,24 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
// increment all the "future" shards to update the total ops since we some may work and some may not...
// and when that happens, we break on total ops, so we must maintain them
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
successfulShardExecution(shardIt);
}
private void successfulShardExecution(SearchShardIterator shardsIt) {
final int remainingOpsOnIterator;
if (shardsIt.skip()) {
remainingOpsOnIterator = shardsIt.remaining();
} else {
remainingOpsOnIterator = shardsIt.remaining() + 1;
}
final int xTotalOps = totalOps.addAndGet(remainingOpsOnIterator);
if (xTotalOps == expectedTotalOps) {
onPhaseDone();
} else if (xTotalOps > expectedTotalOps) {
throw new AssertionError("unexpected higher total ops [" + xTotalOps + "] compared to expected ["
+ expectedTotalOps + "]");
} else {
maybeExecuteNext();
}
}
@ -220,41 +245,39 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
/**
* This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing
*/
static class SearchPhaseResults<Result extends SearchPhaseResult> {
final AtomicArray<Result> results;
abstract static class SearchPhaseResults<Result extends SearchPhaseResult> {
private final int numShards;
SearchPhaseResults(int size) {
results = new AtomicArray<>(size);
protected SearchPhaseResults(int numShards) {
this.numShards = numShards;
}
/**
* Returns the number of expected results this class should collect
*/
final int getNumShards() {
return results.length();
return numShards;
}
/**
* A stream of all non-null (successful) shard results
*/
final Stream<Result> getSuccessfulResults() {
return results.asList().stream();
}
abstract Stream<Result> getSuccessfulResults();
/**
* Consumes a single shard result
* @param result the shards result
*/
void consumeResult(Result result) {
assert results.get(result.getShardIndex()) == null : "shardIndex: " + result.getShardIndex() + " is already set";
results.set(result.getShardIndex(), result);
}
abstract void consumeResult(Result result);
/**
* Returns <code>true</code> iff a result if present for the given shard ID.
*/
final boolean hasResult(int shardIndex) {
return results.get(shardIndex) != null;
abstract boolean hasResult(int shardIndex);
void consumeShardFailure(int shardIndex) {}
AtomicArray<Result> getAtomicArray() {
throw new UnsupportedOperationException();
}
/**
@ -264,4 +287,40 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
throw new UnsupportedOperationException("reduce is not supported");
}
}
/**
* This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing
*/
static class ArraySearchPhaseResults<Result extends SearchPhaseResult> extends SearchPhaseResults<Result> {
final AtomicArray<Result> results;
ArraySearchPhaseResults(int size) {
super(size);
this.results = new AtomicArray<>(size);
}
Stream<Result> getSuccessfulResults() {
return results.asList().stream();
}
void consumeResult(Result result) {
assert results.get(result.getShardIndex()) == null : "shardIndex: " + result.getShardIndex() + " is already set";
results.set(result.getShardIndex(), result);
}
boolean hasResult(int shardIndex) {
return results.get(shardIndex) != null;
}
@Override
AtomicArray<Result> getAtomicArray() {
return results;
}
}
protected void skipShard(SearchShardIterator iterator) {
assert iterator.skip();
successfulShardExecution(iterator);
}
}

View File

@ -42,7 +42,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
final long clusterStateVersion, final SearchTask task) {
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
shardsIts, timeProvider, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size()));
shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size()));
this.searchPhaseController = searchPhaseController;
}
@ -55,7 +55,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
@Override
protected SearchPhase getNextPhase(final SearchPhaseResults<DfsSearchResult> results, final SearchPhaseContext context) {
return new DfsQueryPhase(results.results, searchPhaseController, (queryResults) ->
return new DfsQueryPhase(results.getAtomicArray(), searchPhaseController, (queryResults) ->
new FetchSearchPhase(queryResults, searchPhaseController, context), context);
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.ObjectObjectHashMap;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.FieldDoc;
@ -329,9 +330,9 @@ public final class SearchPhaseController extends AbstractComponent {
}
FetchSearchResult fetchResult = searchResultProvider.fetchResult();
final int index = fetchResult.counterGetAndIncrement();
assert index < fetchResult.hits().internalHits().length : "not enough hits fetched. index [" + index + "] length: "
+ fetchResult.hits().internalHits().length;
SearchHit hit = fetchResult.hits().internalHits()[index];
assert index < fetchResult.hits().getHits().length : "not enough hits fetched. index [" + index + "] length: "
+ fetchResult.hits().getHits().length;
SearchHit hit = fetchResult.hits().getHits()[index];
CompletionSuggestion.Entry.Option suggestOption =
suggestionOptions.get(scoreDocIndex - currentOffset);
hit.score(shardDoc.score);
@ -381,9 +382,9 @@ public final class SearchPhaseController extends AbstractComponent {
}
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
final int index = fetchResult.counterGetAndIncrement();
assert index < fetchResult.hits().internalHits().length : "not enough hits fetched. index [" + index + "] length: "
+ fetchResult.hits().internalHits().length;
SearchHit searchHit = fetchResult.hits().internalHits()[index];
assert index < fetchResult.hits().getHits().length : "not enough hits fetched. index [" + index + "] length: "
+ fetchResult.hits().getHits().length;
SearchHit searchHit = fetchResult.hits().getHits()[index];
searchHit.score(shardDoc.score);
searchHit.shard(fetchResult.getSearchShardTarget());
if (sorted) {
@ -606,12 +607,12 @@ public final class SearchPhaseController extends AbstractComponent {
}
/**
* A {@link org.elasticsearch.action.search.InitialSearchPhase.SearchPhaseResults} implementation
* A {@link InitialSearchPhase.ArraySearchPhaseResults} implementation
* that incrementally reduces aggregation results as shard results are consumed.
* This implementation can be configured to batch up a certain amount of results and only reduce them
* iff the buffer is exhausted.
*/
static final class QueryPhaseResultConsumer extends InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> {
static final class QueryPhaseResultConsumer extends InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult> {
private final InternalAggregations[] aggsBuffer;
private final TopDocs[] topDocsBuffer;
private final boolean hasAggs;
@ -713,9 +714,9 @@ public final class SearchPhaseController extends AbstractComponent {
}
/**
* Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally.
* Returns a new ArraySearchPhaseResults instance. This might return an instance that reduces search responses incrementally.
*/
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> newSearchPhaseResults(SearchRequest request, int numShards) {
InitialSearchPhase.ArraySearchPhaseResults<SearchPhaseResult> newSearchPhaseResults(SearchRequest request, int numShards) {
SearchSourceBuilder source = request.source();
boolean isScrollRequest = request.scroll() != null;
final boolean hasAggs = source != null && source.aggregations() != null;
@ -729,7 +730,7 @@ public final class SearchPhaseController extends AbstractComponent {
return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize(), hasTopDocs, hasAggs);
}
}
return new InitialSearchPhase.SearchPhaseResults(numShards) {
return new InitialSearchPhase.ArraySearchPhaseResults(numShards) {
@Override
public ReducedQueryPhase reduce() {
return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHits);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
@ -57,6 +58,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false"));
public static final int DEFAULT_PRE_FILTER_SHARD_SIZE = 128;
private SearchType searchType = SearchType.DEFAULT;
private String[] indices = Strings.EMPTY_ARRAY;
@ -74,6 +77,10 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
private int batchedReduceSize = 512;
private int maxConcurrentShardRequests = 0;
private int preFilterShardSize = DEFAULT_PRE_FILTER_SHARD_SIZE;
private String[] types = Strings.EMPTY_ARRAY;
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed();
@ -302,6 +309,56 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
return batchedReduceSize;
}
/**
* Returns the number of shard requests that should be executed concurrently. This value should be used as a protection mechanism to
* reduce the number of shard reqeusts fired per high level search request. Searches that hit the entire cluster can be throttled
* with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most <tt>256</tt>.
*/
public int getMaxConcurrentShardRequests() {
return maxConcurrentShardRequests == 0 ? 256 : maxConcurrentShardRequests;
}
/**
* Sets the number of shard requests that should be executed concurrently. This value should be used as a protection mechanism to
* reduce the number of shard requests fired per high level search request. Searches that hit the entire cluster can be throttled
* with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most <tt>256</tt>.
*/
public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) {
if (maxConcurrentShardRequests < 1) {
throw new IllegalArgumentException("maxConcurrentShardRequests must be >= 1");
}
this.maxConcurrentShardRequests = maxConcurrentShardRequests;
}
/**
* Sets a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards
* the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for
* instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard
* bounds and the query are disjoint. The default is <tt>128</tt>
*/
public void setPreFilterShardSize(int preFilterShardSize) {
if (preFilterShardSize < 1) {
throw new IllegalArgumentException("preFilterShardSize must be >= 1");
}
this.preFilterShardSize = preFilterShardSize;
}
/**
* Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards
* the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for
* instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard
* bounds and the query are disjoint. The default is <tt>128</tt>
*/
public int getPreFilterShardSize() {
return preFilterShardSize;
}
/**
* Returns <code>true</code> iff the maxConcurrentShardRequest is set.
*/
boolean isMaxConcurrentShardRequestsSet() {
return maxConcurrentShardRequests != 0;
}
/**
* @return true if the request only has suggest
*/
@ -349,6 +406,10 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
indicesOptions = IndicesOptions.readIndicesOptions(in);
requestCache = in.readOptionalBoolean();
batchedReduceSize = in.readVInt();
if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
maxConcurrentShardRequests = in.readVInt();
preFilterShardSize = in.readVInt();
}
}
@Override
@ -367,6 +428,10 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
indicesOptions.writeIndicesOptions(out);
out.writeOptionalBoolean(requestCache);
out.writeVInt(batchedReduceSize);
if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
out.writeVInt(maxConcurrentShardRequests);
out.writeVInt(preFilterShardSize);
}
}
@Override
@ -386,13 +451,16 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
Objects.equals(requestCache, that.requestCache) &&
Objects.equals(scroll, that.scroll) &&
Arrays.equals(types, that.types) &&
Objects.equals(batchedReduceSize, that.batchedReduceSize) &&
Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) &&
Objects.equals(preFilterShardSize, that.preFilterShardSize) &&
Objects.equals(indicesOptions, that.indicesOptions);
}
@Override
public int hashCode() {
return Objects.hash(searchType, Arrays.hashCode(indices), routing, preference, source, requestCache,
scroll, Arrays.hashCode(types), indicesOptions);
scroll, Arrays.hashCode(types), indicesOptions, batchedReduceSize, maxConcurrentShardRequests, preFilterShardSize);
}
@Override
@ -406,6 +474,9 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
", preference='" + preference + '\'' +
", requestCache=" + requestCache +
", scroll=" + scroll +
", maxConcurrentShardRequests=" + maxConcurrentShardRequests +
", batchedReduceSize=" + batchedReduceSize +
", preFilterShardSize=" + preFilterShardSize +
", source=" + source + '}';
}
}

Some files were not shown because too many files have changed in this diff Show More