Merge branch 'master' into feature/aggs_2_0
Conflicts: src/main/java/org/elasticsearch/percolator/PercolatorService.java src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java
This commit is contained in:
commit
56a37620ed
|
@ -22,8 +22,6 @@ $ curl -XGET 'http://localhost:9200/_cluster/health?pretty=true'
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
coming[1.5.0, number of pending tasks was added in 1.5.0]
|
||||
|
||||
The API can also be executed against one or more indices to get just the
|
||||
specified indices health:
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[elasticsearch-reference]]
|
||||
= Reference
|
||||
|
||||
:version: 1.5.0
|
||||
:version: 1.5.1
|
||||
:branch: 1.5
|
||||
:jdk: 1.8.0_25
|
||||
:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/current
|
||||
|
|
|
@ -19,7 +19,6 @@ curl -XGET http://localhost:9200/_recovery?pretty&human
|
|||
--------------------------------------------------
|
||||
|
||||
Response:
|
||||
coming[1.5.0, this syntax was change to fix inconsistencies with other API]
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
|
@ -94,7 +93,6 @@ In some cases a higher level of detail may be preferable. Setting "detailed=true
|
|||
curl -XGET http://localhost:9200/_recovery?pretty&human&detailed=true
|
||||
--------------------------------------------------
|
||||
|
||||
coming[1.5.0, this syntax was change to fix inconsistencies with other API]
|
||||
Response:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -122,8 +122,6 @@ You can also set the default value to any date respecting <<mapping-timestamp-fi
|
|||
|
||||
If you don't provide any timestamp value, _timestamp will be set to this default value.
|
||||
|
||||
coming[1.5.0]
|
||||
|
||||
In elasticsearch 1.4, we allowed setting explicitly `"default":null` which is not possible anymore
|
||||
as we added a new `ignore_missing` setting.
|
||||
When reading an index created with elasticsearch 1.4 and using this, we automatically update it by
|
||||
|
|
|
@ -378,6 +378,9 @@ defaults to `true` or to the parent `object` type setting.
|
|||
|
||||
|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.
|
||||
|
||||
|`numeric_resolution` |The unit to use when passed in a numeric values. Possible
|
||||
values include `seconds` and `milliseconds` (default).
|
||||
|
||||
|=======================================================================
|
||||
|
||||
[float]
|
||||
|
|
|
@ -139,6 +139,9 @@ equivalent to the former `pre_zone` option. Setting `time_zone` to a value like
|
|||
being applied in the specified time zone but In addition to this, also the `pre_zone_adjust_large_interval` is removed because we
|
||||
now always return dates and bucket keys in UTC.
|
||||
|
||||
`include`/`exclude` filtering on the `terms` aggregation now uses the same syntax as regexp queries instead of the Java syntax. While simple
|
||||
regexps should still work, more complex ones might need some rewriting. Also, the `flags` parameter is not supported anymore.
|
||||
|
||||
=== Terms filter lookup caching
|
||||
|
||||
The terms filter lookup mechanism does not support the `cache` option anymore
|
||||
|
@ -371,3 +374,9 @@ over AJAX instead:
|
|||
http.cors.enabled: true
|
||||
http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/
|
||||
---------------
|
||||
|
||||
=== Cluster state REST api
|
||||
|
||||
The cluster state api doesn't return the `routing_nodes` section anymore when
|
||||
`routing_table` is requested. The newly introduced `routing_nodes` flag can
|
||||
be used separately to control whether `routing_nodes` should be returned.
|
||||
|
|
|
@ -217,6 +217,8 @@ You can disable that check using `plugins.check_lucene: false`.
|
|||
[[river]]
|
||||
==== River Plugins
|
||||
|
||||
deprecated[1.5.0,Rivers have been deprecated. See https://www.elastic.co/blog/deprecating_rivers for more details]
|
||||
|
||||
.Supported by Elasticsearch
|
||||
* https://github.com/elasticsearch/elasticsearch-river-couchdb[CouchDB River Plugin]
|
||||
* https://github.com/elasticsearch/elasticsearch-river-rabbitmq[RabbitMQ River Plugin]
|
||||
|
|
|
@ -378,7 +378,7 @@ that can be used for configuring this sandbox:
|
|||
|
||||
`script.groovy.sandbox.enabled`::
|
||||
|
||||
Flag to enable the sandbox (defaults to `false` added[v1.4.3] meaning the sandbox is
|
||||
Flag to enable the sandbox (defaults to `false` meaning the sandbox is
|
||||
disabled).
|
||||
|
||||
When specifying whitelist or blacklist settings for the groovy sandbox, all
|
||||
|
|
|
@ -85,7 +85,6 @@ the JVM. It is automatically enabled when using
|
|||
`NodeBuilder#local(true)`.
|
||||
|
||||
[float]
|
||||
coming[1.5.0]
|
||||
=== Transport Tracer
|
||||
|
||||
The transport module has a dedicated tracer logger which, when activated, logs incoming and out going requests. The log can be dynamically activated
|
||||
|
|
|
@ -86,8 +86,6 @@ query. The parameter `boost_mode` defines how:
|
|||
`max`:: max of query score and function score
|
||||
`min`:: min of query score and function score
|
||||
|
||||
coming[1.5.0]
|
||||
|
||||
By default, modifying the score does not change which documents match. To exclude
|
||||
documents that do not meet a certain score threshold the `min_score` parameter can be set to the desired score threshold.
|
||||
|
||||
|
|
|
@ -347,8 +347,6 @@ It is hard to say which one of the different heuristics will be the best choice
|
|||
If none of the above measures suits your usecase than another option is to implement a custom significance measure:
|
||||
|
||||
===== scripted
|
||||
coming[1.5.0]
|
||||
|
||||
Customized scores can be implemented via a script:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -482,42 +482,7 @@ with `water_` (so the tag `water_sports` will no be aggregated). The `include` r
|
|||
values are "allowed" to be aggregated, while the `exclude` determines the values that should not be aggregated. When
|
||||
both are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.
|
||||
|
||||
The regular expression are based on the Java(TM) http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html[Pattern],
|
||||
and as such, they it is also possible to pass in flags that will determine how the compiled regular expression will work:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"aggs" : {
|
||||
"tags" : {
|
||||
"terms" : {
|
||||
"field" : "tags",
|
||||
"include" : {
|
||||
"pattern" : ".*sport.*",
|
||||
"flags" : "CANON_EQ|CASE_INSENSITIVE" <1>
|
||||
},
|
||||
"exclude" : {
|
||||
"pattern" : "water_.*",
|
||||
"flags" : "CANON_EQ|CASE_INSENSITIVE"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
<1> the flags are concatenated using the `|` character as a separator
|
||||
|
||||
The possible flags that can be used are:
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#CANON_EQ[`CANON_EQ`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#CASE_INSENSITIVE[`CASE_INSENSITIVE`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#COMMENTS[`COMMENTS`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#DOTALL[`DOTALL`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#LITERAL[`LITERAL`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#MULTILINE[`MULTILINE`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNICODE_CASE[`UNICODE_CASE`],
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNICODE_CHARACTER_CLASS[`UNICODE_CHARACTER_CLASS`] and
|
||||
http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNIX_LINES[`UNIX_LINES`]
|
||||
The syntax is the same as <<regexp-syntax,regexp queries>>.
|
||||
|
||||
For matching based on exact values the `include` and `exclude` parameters can simply take an array of
|
||||
strings that represent the terms as they are found in the index:
|
||||
|
|
|
@ -46,8 +46,6 @@ The above aggregation computes the grades statistics over all documents. The agg
|
|||
The name of the aggregation (`grades_stats` above) also serves as the key by which the aggregation result can be retrieved from the returned response.
|
||||
|
||||
==== Standard Deviation Bounds
|
||||
added[1.4.3]
|
||||
|
||||
By default, the `extended_stats` metric will return an object called `std_deviation_bounds`, which provides an interval of plus/minus two standard
|
||||
deviations from the mean. This can be a useful way to visualize variance of your data. If you want a different boundary, for example
|
||||
three standard deviations, you can set `sigma` in the request:
|
||||
|
@ -65,7 +63,7 @@ three standard deviations, you can set `sigma` in the request:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> `sigma` controls how many standard deviations +/- from the mean should be displayed added[1.4.3]
|
||||
<1> `sigma` controls how many standard deviations +/- from the mean should be displayed
|
||||
|
||||
`sigma` can be any non-negative double, meaning you can request non-integer values such as `1.5`. A value of `0` is valid, but will simply
|
||||
return the average for both `upper` and `lower` bounds.
|
||||
|
|
|
@ -28,7 +28,7 @@ documentation of the mustache project].
|
|||
NOTE: The mustache language is implemented in elasticsearch as a sandboxed
|
||||
scripting language, hence it obeys settings that may be used to enable or
|
||||
disable scripts per language, source and operation as described in
|
||||
<<enable-dynamic-scripting, scripting docs>> coming[1.6.0, `mustache` scripts were always on before and it wasn't possible to disable them].
|
||||
<<enable-dynamic-scripting, scripting docs>>
|
||||
|
||||
[float]
|
||||
==== More template examples
|
||||
|
|
|
@ -323,6 +323,6 @@ appender section contains the destinations for the logs. Extensive information
|
|||
on how to customize logging and all the supported appenders can be found on
|
||||
the http://logging.apache.org/log4j/1.2/manual.html[log4j documentation].
|
||||
|
||||
coming[1.5.0] Additional Appenders and other logging classes provided by
|
||||
Additional Appenders and other logging classes provided by
|
||||
http://logging.apache.org/log4j/extras/[log4j-extras] are also available,
|
||||
out of the box.
|
||||
|
|
194
pom.xml
194
pom.xml
|
@ -893,6 +893,98 @@
|
|||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- Filters some files and uses packaging.properties when building the .deb package -->
|
||||
<execution>
|
||||
<id>copy-resources-deb</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.build.directory}/generated-packaging/deb/</outputDirectory>
|
||||
<filters>
|
||||
<filter>src/packaging/common/packaging.properties</filter>
|
||||
<filter>src/packaging/deb/packaging.properties</filter>
|
||||
</filters>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/packaging/common/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>src/packaging/deb/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>${project.basedir}</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>bin/elasticsearch</include>
|
||||
<include>bin/elasticsearch.in.sh</include>
|
||||
<include>bin/plugin</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- Filters some files and uses packaging.properties when building the .rpm package -->
|
||||
<execution>
|
||||
<id>copy-resources-rpm</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.build.directory}/generated-packaging/rpm/</outputDirectory>
|
||||
<filters>
|
||||
<filter>src/packaging/common/packaging.properties</filter>
|
||||
<filter>src/packaging/rpm/packaging.properties</filter>
|
||||
</filters>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/packaging/common/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>src/packaging/rpm/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>${project.basedir}</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>bin/elasticsearch</include>
|
||||
<include>bin/elasticsearch.in.sh</include>
|
||||
<include>bin/plugin</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
|
@ -1101,57 +1193,6 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!-- use packaging.properties when building the .deb package -->
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>copy-resources-deb</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.build.directory}/generated-packaging/deb/</outputDirectory>
|
||||
<filters>
|
||||
<filter>src/packaging/common/packaging.properties</filter>
|
||||
<filter>src/packaging/deb/packaging.properties</filter>
|
||||
</filters>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/packaging/common/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>src/packaging/deb/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>${project.basedir}</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>bin/elasticsearch</include>
|
||||
<include>bin/elasticsearch.in.sh</include>
|
||||
<include>bin/plugin</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<!-- Rpm Packaging -->
|
||||
<plugin>
|
||||
|
@ -1364,57 +1405,6 @@
|
|||
</postremoveScriptlet>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!-- use packaging.properties when building the .rpm package -->
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>copy-resources-rpm</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.build.directory}/generated-packaging/rpm/</outputDirectory>
|
||||
<filters>
|
||||
<filter>src/packaging/common/packaging.properties</filter>
|
||||
<filter>src/packaging/rpm/packaging.properties</filter>
|
||||
</filters>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/packaging/common/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>src/packaging/rpm/</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>packaging.properties</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>${project.basedir}</directory>
|
||||
<filtering>true</filtering>
|
||||
<includes>
|
||||
<include>bin/elasticsearch</include>
|
||||
<include>bin/elasticsearch.in.sh</include>
|
||||
<include>bin/plugin</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>de.thetaphi</groupId>
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
},
|
||||
"metric" : {
|
||||
"type" : "list",
|
||||
"options" : ["_all", "blocks", "metadata", "nodes", "routing_table", "master_node", "version"],
|
||||
"options" : ["_all", "blocks", "metadata", "nodes", "routing_table", "routing_nodes", "master_node", "version"],
|
||||
"description" : "Limit the information returned to the specified metrics"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -81,6 +81,18 @@ setup:
|
|||
- is_false: nodes
|
||||
- is_false: metadata
|
||||
- is_true: routing_table
|
||||
- is_false: routing_nodes
|
||||
|
||||
---
|
||||
"Filtering the cluster state by routing nodes only should work":
|
||||
- do:
|
||||
cluster.state:
|
||||
metric: [ routing_nodes ]
|
||||
|
||||
- is_false: blocks
|
||||
- is_false: nodes
|
||||
- is_false: metadata
|
||||
- is_false: routing_table
|
||||
- is_true: routing_nodes
|
||||
|
||||
---
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
- match: { _id: "1" }
|
||||
|
||||
- do:
|
||||
get_template:
|
||||
id: 1
|
||||
get_template:
|
||||
id: 1
|
||||
- match: { found: true }
|
||||
- match: { lang: mustache }
|
||||
- match: { _id: "1" }
|
||||
|
@ -17,9 +17,9 @@
|
|||
- match: { template: /.*query\S\S\S\Smatch_all.*/ }
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
get_template:
|
||||
id: 2
|
||||
catch: missing
|
||||
get_template:
|
||||
id: 2
|
||||
- match: { found: false }
|
||||
- match: { lang: mustache }
|
||||
- match: { _id: "2" }
|
||||
|
@ -27,17 +27,17 @@
|
|||
- is_false: template
|
||||
|
||||
- do:
|
||||
delete_template:
|
||||
id: "1"
|
||||
delete_template:
|
||||
id: "1"
|
||||
- match: { found: true }
|
||||
- match: { _index: ".scripts" }
|
||||
- match: { _id: "1" }
|
||||
- match: { _version: 2}
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
delete_template:
|
||||
id: "non_existing"
|
||||
catch: missing
|
||||
delete_template:
|
||||
id: "non_existing"
|
||||
- match: { found: false }
|
||||
- match: { _index: ".scripts" }
|
||||
- match: { _id: "non_existing" }
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -33,7 +32,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -261,6 +259,7 @@ public class ClusterState implements ToXContent {
|
|||
NODES("nodes"),
|
||||
METADATA("metadata"),
|
||||
ROUTING_TABLE("routing_table"),
|
||||
ROUTING_NODES("routing_nodes"),
|
||||
CUSTOMS("customs");
|
||||
|
||||
private static Map<String, Metric> valueToEnum;
|
||||
|
@ -465,7 +464,7 @@ public class ClusterState implements ToXContent {
|
|||
}
|
||||
|
||||
// routing nodes
|
||||
if (metrics.contains(Metric.ROUTING_TABLE)) {
|
||||
if (metrics.contains(Metric.ROUTING_NODES)) {
|
||||
builder.startObject("routing_nodes");
|
||||
builder.startArray("unassigned");
|
||||
for (ShardRouting shardRouting : readOnlyRoutingNodes().unassigned()) {
|
||||
|
|
|
@ -208,6 +208,9 @@ public class XContentHelper {
|
|||
if (params.paramAsBoolean("pretty", true)) {
|
||||
builder.prettyPrint();
|
||||
}
|
||||
if (params.paramAsBoolean("human", true)) {
|
||||
builder.humanReadable(true);
|
||||
}
|
||||
builder.startObject();
|
||||
toXContent.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
|
|
|
@ -88,7 +88,8 @@ public abstract class Engine implements Closeable {
|
|||
this.engineConfig = engineConfig;
|
||||
this.shardId = engineConfig.getShardId();
|
||||
this.store = engineConfig.getStore();
|
||||
this.logger = Loggers.getLogger(getClass(), engineConfig.getIndexSettings(), engineConfig.getShardId());
|
||||
this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name
|
||||
engineConfig.getIndexSettings(), engineConfig.getShardId());
|
||||
this.failedEngineListener = engineConfig.getFailedEngineListener();
|
||||
this.deletionPolicy = engineConfig.getDeletionPolicy();
|
||||
}
|
||||
|
|
|
@ -621,7 +621,9 @@ public class InternalEngine extends Engine {
|
|||
long translogId = translogIdGenerator.incrementAndGet();
|
||||
translog.newTransientTranslog(translogId);
|
||||
indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)));
|
||||
logger.trace("starting commit for flush; commitTranslog=true");
|
||||
commitIndexWriter(indexWriter);
|
||||
logger.trace("finished commit for flush");
|
||||
// we need to refresh in order to clear older version values
|
||||
refresh("version_table_flush");
|
||||
// we need to move transient to current only after we refresh
|
||||
|
@ -648,7 +650,9 @@ public class InternalEngine extends Engine {
|
|||
try {
|
||||
long translogId = translog.currentId();
|
||||
indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)));
|
||||
logger.trace("starting commit for flush; commitTranslog=false");
|
||||
commitIndexWriter(indexWriter);
|
||||
logger.trace("finished commit for flush");
|
||||
} catch (Throwable e) {
|
||||
throw new FlushFailedEngineException(shardId, e);
|
||||
}
|
||||
|
@ -770,9 +774,12 @@ public class InternalEngine extends Engine {
|
|||
public SnapshotIndexCommit snapshotIndex() throws EngineException {
|
||||
// we have to flush outside of the readlock otherwise we might have a problem upgrading
|
||||
// the to a write lock when we fail the engine in this operation
|
||||
logger.trace("start flush for snapshot");
|
||||
flush(false, false, true);
|
||||
logger.trace("finish flush for snapshot");
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
logger.trace("pulling snapshot");
|
||||
return deletionPolicy.snapshot();
|
||||
} catch (IOException e) {
|
||||
throw new SnapshotFailedEngineException(shardId, e);
|
||||
|
@ -902,6 +909,11 @@ public class InternalEngine extends Engine {
|
|||
if (isClosed.compareAndSet(false, true)) {
|
||||
assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself";
|
||||
try {
|
||||
try {
|
||||
translog.sync();
|
||||
} catch (IOException ex) {
|
||||
logger.warn("failed to sync translog");
|
||||
}
|
||||
this.versionMap.clear();
|
||||
logger.trace("close searcherManager");
|
||||
try {
|
||||
|
|
|
@ -488,13 +488,14 @@ public class DateFieldMapper extends NumberFieldMapper<Long> {
|
|||
}
|
||||
|
||||
if (value != null) {
|
||||
final long timestamp = timeUnit.toMillis(value);
|
||||
if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) {
|
||||
CustomLongNumericField field = new CustomLongNumericField(this, value, fieldType);
|
||||
CustomLongNumericField field = new CustomLongNumericField(this, timestamp, fieldType);
|
||||
field.setBoost(boost);
|
||||
fields.add(field);
|
||||
}
|
||||
if (hasDocValues()) {
|
||||
addDocValue(context, fields, value);
|
||||
addDocValue(context, fields, timestamp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -553,8 +554,7 @@ public class DateFieldMapper extends NumberFieldMapper<Long> {
|
|||
return dateTimeFormatter.parser().parseMillis(value);
|
||||
} catch (RuntimeException e) {
|
||||
try {
|
||||
long time = Long.parseLong(value);
|
||||
return timeUnit.toMillis(time);
|
||||
return Long.parseLong(value);
|
||||
} catch (NumberFormatException e1) {
|
||||
throw new MapperParsingException("failed to parse date field [" + value + "], tried both date format [" + dateTimeFormatter.format() + "], and timestamp number with locale [" + dateTimeFormatter.locale() + "]", e);
|
||||
}
|
||||
|
|
|
@ -655,6 +655,15 @@ public class RecoveryState implements ToXContent, Streamable {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = name.hashCode();
|
||||
result = 31 * result + (int) (length ^ (length >>> 32));
|
||||
result = 31 * result + (int) (recovered ^ (recovered >>> 32));
|
||||
result = 31 * result + (reused ? 1 : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "file (name [" + name + "], reused [" + reused + "], length [" + length + "], recovered [" + recovered + "])";
|
||||
|
|
|
@ -851,18 +851,18 @@ public class PercolatorService extends AbstractComponent {
|
|||
if (shardResults.size() == 1) {
|
||||
aggregations = shardResults.get(0).aggregations();
|
||||
} else {
|
||||
List<InternalAggregations> aggregationsList = new ArrayList<>(shardResults.size());
|
||||
for (PercolateShardResponse shardResult : shardResults) {
|
||||
aggregationsList.add(shardResult.aggregations());
|
||||
}
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(null, bigArrays, scriptService));
|
||||
List<InternalAggregations> aggregationsList = new ArrayList<>(shardResults.size());
|
||||
for (PercolateShardResponse shardResult : shardResults) {
|
||||
aggregationsList.add(shardResult.aggregations());
|
||||
}
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService));
|
||||
}
|
||||
if (aggregations != null) {
|
||||
List<SiblingReducer> reducers = shardResults.get(0).reducers();
|
||||
if (reducers != null) {
|
||||
List<InternalAggregation> newAggs = new ArrayList<>(Lists.transform(aggregations.asList(), Reducer.AGGREGATION_TRANFORM_FUNCTION));
|
||||
for (SiblingReducer reducer : reducers) {
|
||||
InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(null, bigArrays,
|
||||
InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays,
|
||||
scriptService));
|
||||
newAggs.add(newAgg);
|
||||
}
|
||||
|
|
|
@ -92,6 +92,7 @@ abstract class QueryCollector extends SimpleCollector {
|
|||
context.aggregations().aggregators(aggregators);
|
||||
}
|
||||
aggregatorCollector = BucketCollector.wrap(aggregatorCollectors);
|
||||
aggregatorCollector.preCollection();
|
||||
}
|
||||
|
||||
public void postMatch(int doc) throws IOException {
|
||||
|
|
|
@ -72,7 +72,8 @@ public class RestClusterStateAction extends BaseRestHandler {
|
|||
EnumSet<ClusterState.Metric> metrics = ClusterState.Metric.parseString(request.param("metric"), true);
|
||||
// do not ask for what we do not need.
|
||||
clusterStateRequest.nodes(metrics.contains(ClusterState.Metric.NODES) || metrics.contains(ClusterState.Metric.MASTER_NODE));
|
||||
clusterStateRequest.routingTable(metrics.contains(ClusterState.Metric.ROUTING_TABLE));
|
||||
//there is no distinction in Java api between routing_table and routing_nodes, it's the same info set over the wire, one single flag to ask for it
|
||||
clusterStateRequest.routingTable(metrics.contains(ClusterState.Metric.ROUTING_TABLE) || metrics.contains(ClusterState.Metric.ROUTING_NODES));
|
||||
clusterStateRequest.metaData(metrics.contains(ClusterState.Metric.METADATA));
|
||||
clusterStateRequest.blocks(metrics.contains(ClusterState.Metric.BLOCKS));
|
||||
}
|
||||
|
|
|
@ -23,8 +23,9 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
/**
|
||||
*
|
||||
* @deprecated See blog post https://www.elastic.co/blog/deprecating_rivers
|
||||
*/
|
||||
@Deprecated
|
||||
public class AbstractRiverComponent implements RiverComponent {
|
||||
|
||||
protected final ESLogger logger;
|
||||
|
@ -48,4 +49,4 @@ public class AbstractRiverComponent implements RiverComponent {
|
|||
public String nodeName() {
|
||||
return settings.globalSettings().get("name", "");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,9 @@ package org.elasticsearch.river;
|
|||
/**
|
||||
* Allows to import data into elasticsearch via plugin
|
||||
* Gets allocated on a node and eventually automatically re-allocated if needed
|
||||
* @deprecated See blog post https://www.elastic.co/blog/deprecating_rivers
|
||||
*/
|
||||
@Deprecated
|
||||
public interface River extends RiverComponent {
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,8 +20,9 @@
|
|||
package org.elasticsearch.river;
|
||||
|
||||
/**
|
||||
*
|
||||
* @deprecated See blog post https://www.elastic.co/blog/deprecating_rivers
|
||||
*/
|
||||
@Deprecated
|
||||
public interface RiverComponent {
|
||||
|
||||
RiverName riverName();
|
||||
|
|
|
@ -22,8 +22,9 @@ package org.elasticsearch.river;
|
|||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
*
|
||||
* @deprecated See blog post https://www.elastic.co/blog/deprecating_rivers
|
||||
*/
|
||||
@Deprecated
|
||||
public class RiverName implements Serializable {
|
||||
|
||||
private final String type;
|
||||
|
|
|
@ -126,6 +126,7 @@ public class RiversService extends AbstractLifecycleComponent<RiversService> {
|
|||
return;
|
||||
}
|
||||
|
||||
logger.info("rivers have been deprecated. Read https://www.elastic.co/blog/deprecating_rivers");
|
||||
logger.debug("creating river [{}][{}]", riverName.type(), riverName.name());
|
||||
|
||||
try {
|
||||
|
|
|
@ -76,23 +76,23 @@ public class AggregationPhase implements SearchPhase {
|
|||
|
||||
List<Aggregator> collectors = new ArrayList<>();
|
||||
Aggregator[] aggregators;
|
||||
List<Reducer> reducers;
|
||||
try {
|
||||
AggregatorFactories factories = context.aggregations().factories();
|
||||
aggregators = factories.createTopLevelAggregators(aggregationContext);
|
||||
reducers = factories.createReducers();
|
||||
for (int i = 0; i < aggregators.length; i++) {
|
||||
if (aggregators[i] instanceof GlobalAggregator == false) {
|
||||
collectors.add(aggregators[i]);
|
||||
}
|
||||
}
|
||||
context.aggregations().aggregators(aggregators);
|
||||
if (!collectors.isEmpty()) {
|
||||
final BucketCollector collector = BucketCollector.wrap(collectors);
|
||||
collector.preCollection();
|
||||
context.searcher().queryCollectors().put(AggregationPhase.class, collector);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new AggregationInitializationException("Could not initialize aggregators", e);
|
||||
}
|
||||
for (int i = 0; i < aggregators.length; i++) {
|
||||
if (aggregators[i] instanceof GlobalAggregator == false) {
|
||||
collectors.add(aggregators[i]);
|
||||
}
|
||||
}
|
||||
context.aggregations().aggregators(aggregators);
|
||||
if (!collectors.isEmpty()) {
|
||||
context.searcher().queryCollectors().put(AggregationPhase.class, (BucketCollector.wrap(collectors)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,14 +118,15 @@ public class AggregationPhase implements SearchPhase {
|
|||
|
||||
// optimize the global collector based execution
|
||||
if (!globals.isEmpty()) {
|
||||
BucketCollector collector = BucketCollector.wrap(globals);
|
||||
BucketCollector globalsCollector = BucketCollector.wrap(globals);
|
||||
Query query = new ConstantScoreQuery(Queries.MATCH_ALL_FILTER);
|
||||
Filter searchFilter = context.searchFilter(context.types());
|
||||
if (searchFilter != null) {
|
||||
query = new FilteredQuery(query, searchFilter);
|
||||
}
|
||||
try {
|
||||
context.searcher().search(query, collector);
|
||||
globalsCollector.preCollection();
|
||||
context.searcher().search(query, globalsCollector);
|
||||
} catch (Exception e) {
|
||||
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
|
||||
}
|
||||
|
|
|
@ -103,7 +103,12 @@ public abstract class AggregatorBase extends Aggregator {
|
|||
*/
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return collectableSubAggregators.needsScores();
|
||||
for (Aggregator agg : subAggregators) {
|
||||
if (agg.needsScores()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public Map<String, Object> metaData() {
|
||||
|
@ -153,6 +158,7 @@ public abstract class AggregatorBase extends Aggregator {
|
|||
}
|
||||
collectableSubAggregators = BucketCollector.wrap(collectors);
|
||||
doPreCollection();
|
||||
collectableSubAggregators.preCollection();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -61,15 +61,6 @@ public class AggregatorFactories {
|
|||
return reducers;
|
||||
}
|
||||
|
||||
private static Aggregator createAndRegisterContextAware(AggregationContext context, AggregatorFactory factory, Aggregator parent, boolean collectsFromSingleBucket) throws IOException {
|
||||
final Aggregator aggregator = factory.create(context, parent, collectsFromSingleBucket);
|
||||
// Once the aggregator is fully constructed perform any initialisation -
|
||||
// can't do everything in constructors if Aggregator base class needs
|
||||
// to delegate to subclasses as part of construction.
|
||||
aggregator.preCollection();
|
||||
return aggregator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create all aggregators so that they can be consumed with multiple buckets.
|
||||
*/
|
||||
|
@ -81,7 +72,7 @@ public class AggregatorFactories {
|
|||
// propagate the fact that only bucket 0 will be collected with single-bucket
|
||||
// aggs
|
||||
final boolean collectsFromSingleBucket = false;
|
||||
aggregators[i] = createAndRegisterContextAware(parent.context(), factories[i], parent, collectsFromSingleBucket);
|
||||
aggregators[i] = factories[i].create(parent.context(), parent, collectsFromSingleBucket);
|
||||
}
|
||||
return aggregators;
|
||||
}
|
||||
|
@ -92,7 +83,7 @@ public class AggregatorFactories {
|
|||
for (int i = 0; i < factories.length; i++) {
|
||||
// top-level aggs only get called with bucket 0
|
||||
final boolean collectsFromSingleBucket = true;
|
||||
aggregators[i] = createAndRegisterContextAware(ctx, factories[i], null, collectsFromSingleBucket);
|
||||
aggregators[i] = factories[i].create(ctx, null, collectsFromSingleBucket);
|
||||
}
|
||||
return aggregators;
|
||||
}
|
||||
|
|
|
@ -93,20 +93,14 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St
|
|||
|
||||
public static class ReduceContext {
|
||||
|
||||
private final List<InternalAggregation> aggregations;
|
||||
private final BigArrays bigArrays;
|
||||
private ScriptService scriptService;
|
||||
|
||||
public ReduceContext(List<InternalAggregation> aggregations, BigArrays bigArrays, ScriptService scriptService) {
|
||||
this.aggregations = aggregations;
|
||||
public ReduceContext(BigArrays bigArrays, ScriptService scriptService) {
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
||||
}
|
||||
|
||||
public List<InternalAggregation> aggregations() {
|
||||
return aggregations;
|
||||
}
|
||||
|
||||
public BigArrays bigArrays() {
|
||||
return bigArrays;
|
||||
}
|
||||
|
@ -153,15 +147,15 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St
|
|||
* try reusing an existing get instance (typically the first in the given list) to save on redundant object
|
||||
* construction.
|
||||
*/
|
||||
public final InternalAggregation reduce(ReduceContext reduceContext) {
|
||||
InternalAggregation aggResult = doReduce(reduceContext);
|
||||
public final InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
InternalAggregation aggResult = doReduce(aggregations, reduceContext);
|
||||
for (Reducer reducer : reducers) {
|
||||
aggResult = reducer.reduce(aggResult, reduceContext);
|
||||
}
|
||||
return aggResult;
|
||||
}
|
||||
|
||||
public abstract InternalAggregation doReduce(ReduceContext reduceContext);
|
||||
public abstract InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext);
|
||||
|
||||
@Override
|
||||
public Object getProperty(String path) {
|
||||
|
|
|
@ -169,8 +169,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl
|
|||
for (Map.Entry<String, List<InternalAggregation>> entry : aggByName.entrySet()) {
|
||||
List<InternalAggregation> aggregations = entry.getValue();
|
||||
InternalAggregation first = aggregations.get(0); // the list can't be empty as it's created on demand
|
||||
reducedAggregations.add(first.reduce(new InternalAggregation.ReduceContext(aggregations, context.bigArrays(), context
|
||||
.scriptService())));
|
||||
reducedAggregations.add(first.reduce(aggregations, context));
|
||||
}
|
||||
return new InternalAggregations(reducedAggregations);
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public final class DeferringBucketCollector extends BucketCollector {
|
|||
if (collector == null) {
|
||||
throw new ElasticsearchIllegalStateException();
|
||||
}
|
||||
return collector.needsScores();
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Set the deferred collectors. */
|
||||
|
@ -138,6 +138,9 @@ public final class DeferringBucketCollector extends BucketCollector {
|
|||
this.selectedBuckets = hash;
|
||||
|
||||
collector.preCollection();
|
||||
if (collector.needsScores()) {
|
||||
throw new ElasticsearchIllegalStateException("Cannot defer if scores are needed");
|
||||
}
|
||||
|
||||
for (Entry entry : entries) {
|
||||
final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context);
|
||||
|
|
|
@ -70,8 +70,7 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio
|
|||
protected abstract InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations);
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
long docCount = 0L;
|
||||
List<InternalAggregations> subAggregationsList = new ArrayList<>(aggregations.size());
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
|
|
|
@ -203,8 +203,7 @@ public class InternalFilters extends InternalMultiBucketAggregation<InternalFilt
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
List<List<Bucket>> bucketsList = null;
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalFilters filters = (InternalFilters) aggregation;
|
||||
|
|
|
@ -200,8 +200,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalGeoHashGrid doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalGeoHashGrid doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
LongObjectPagedHashMap<List<Bucket>> buckets = null;
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
|
|
|
@ -191,7 +191,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
|
||||
public ValueFormatter getFormatter() {
|
||||
return formatter;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean getKeyed() {
|
||||
return keyed;
|
||||
|
@ -335,8 +335,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
|
||||
}
|
||||
|
||||
private List<B> reduceBuckets(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
private List<B> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
final PriorityQueue<IteratorAndCurrent<B>> pq = new PriorityQueue<IteratorAndCurrent<B>>(aggregations.size()) {
|
||||
@Override
|
||||
|
@ -450,8 +449,8 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<B> reducedBuckets = reduceBuckets(reduceContext);
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
List<B> reducedBuckets = reduceBuckets(aggregations, reduceContext);
|
||||
|
||||
// adding empty buckets if needed
|
||||
if (minDocCount == 0) {
|
||||
|
|
|
@ -245,7 +245,7 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
public R create(List<B> ranges, R prototype) {
|
||||
return (R) new InternalRange<>(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(),
|
||||
prototype.metaData);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public B createBucket(InternalAggregations aggregations, B prototype) {
|
||||
|
@ -295,8 +295,7 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Bucket>[] rangeList = new List[ranges.size()];
|
||||
for (int i = 0; i < rangeList.length; ++i) {
|
||||
|
|
|
@ -50,7 +50,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
|||
|
||||
public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories,
|
||||
ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds,
|
||||
IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent,
|
||||
IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent,
|
||||
SignificantTermsAggregatorFactory termsAggFactory, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent,
|
||||
|
@ -65,8 +65,8 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
|||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
super.collect(doc, bucket);
|
||||
numCollectedDocs++;
|
||||
}
|
||||
numCollectedDocs++;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
|||
|
||||
private final LongHash bucketOrds;
|
||||
|
||||
public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
super(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggFactory, reducers, metaData);
|
||||
bucketOrds = new LongHash(1, aggregationContext.bigArrays());
|
||||
}
|
||||
|
@ -164,20 +164,20 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
|||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assert bucket == 0;
|
||||
numCollectedDocs++;
|
||||
globalOrds.setDocument(doc);
|
||||
final int numOrds = globalOrds.cardinality();
|
||||
for (int i = 0; i < numOrds; i++) {
|
||||
final long globalOrd = globalOrds.ordAt(i);
|
||||
long bucketOrd = bucketOrds.add(globalOrd);
|
||||
if (bucketOrd < 0) {
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
numCollectedDocs++;
|
||||
globalOrds.setDocument(doc);
|
||||
final int numOrds = globalOrds.cardinality();
|
||||
for (int i = 0; i < numOrds; i++) {
|
||||
final long globalOrd = globalOrds.ordAt(i);
|
||||
long bucketOrd = bucketOrds.add(globalOrd);
|
||||
if (bucketOrd < 0) {
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
collectExistingBucket(sub, doc, bucketOrd);
|
||||
} else {
|
||||
} else {
|
||||
collectBucket(sub, doc, bucketOrd);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -160,8 +160,7 @@ public abstract class InternalSignificantTerms<A extends InternalSignificantTerm
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
long globalSubsetSize = 0;
|
||||
long globalSupersetSize = 0;
|
||||
|
|
|
@ -49,7 +49,7 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator {
|
|||
|
||||
public SignificantStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource,
|
||||
BucketCountThresholds bucketCountThresholds,
|
||||
IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent,
|
||||
IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent,
|
||||
SignificantTermsAggregatorFactory termsAggFactory, List<Reducer> reducers, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.common.lucene.index.FreqTermsEnum;
|
|||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBase;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.NonCollectingAggregator;
|
||||
|
@ -65,10 +64,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
|
||||
@Override
|
||||
Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource,
|
||||
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
|
||||
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
|
||||
AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory,
|
||||
List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
return new SignificantStringTermsAggregator(name, factories, valuesSource, bucketCountThresholds, includeExclude,
|
||||
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();
|
||||
return new SignificantStringTermsAggregator(name, factories, valuesSource, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, termsAggregatorFactory, reducers, metaData);
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) valuesSource;
|
||||
IndexSearcher indexSearcher = aggregationContext.searchContext().searcher();
|
||||
return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggregatorFactory, reducers, metaData);
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext, parent, termsAggregatorFactory, reducers, metaData);
|
||||
}
|
||||
|
||||
},
|
||||
|
@ -93,8 +94,9 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
|
||||
AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory,
|
||||
List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories,
|
||||
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, includeExclude,
|
||||
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, termsAggregatorFactory, reducers, metaData);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -57,7 +57,7 @@ public class SignificantTermsParser implements Aggregator.Parser {
|
|||
.scriptable(false)
|
||||
.formattable(true)
|
||||
.build();
|
||||
IncludeExclude.Parser incExcParser = new IncludeExclude.Parser(aggregationName, SignificantStringTerms.TYPE, context);
|
||||
IncludeExclude.Parser incExcParser = new IncludeExclude.Parser();
|
||||
aggParser.parse(aggregationName, parser, context, vsParser, incExcParser);
|
||||
|
||||
TermsAggregator.BucketCountThresholds bucketCountThresholds = aggParser.getBucketCountThresholds();
|
||||
|
|
|
@ -84,10 +84,10 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms<UnmappedS
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
if (!(aggregation instanceof UnmappedSignificantTerms)) {
|
||||
return aggregation.reduce(reduceContext);
|
||||
return aggregation.reduce(aggregations, reduceContext);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
|
|
|
@ -59,7 +59,7 @@ import java.util.Map;
|
|||
public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggregator {
|
||||
|
||||
protected final ValuesSource.Bytes.WithOrdinals.FieldData valuesSource;
|
||||
protected final IncludeExclude includeExclude;
|
||||
protected final IncludeExclude.OrdinalsFilter includeExclude;
|
||||
|
||||
// TODO: cache the acceptedglobalValues per aggregation definition.
|
||||
// We can't cache this yet in ValuesSource, since ValuesSource is reused per field for aggs during the execution.
|
||||
|
@ -73,7 +73,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
|
||||
public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource,
|
||||
Terms.Order order, BucketCountThresholds bucketCountThresholds,
|
||||
IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers,
|
||||
metaData);
|
||||
this.valuesSource = valuesSource;
|
||||
|
@ -265,7 +265,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
private final LongHash bucketOrds;
|
||||
|
||||
public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource,
|
||||
Terms.Order order, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext,
|
||||
Terms.Order order, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext,
|
||||
Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
super(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, collectionMode, showTermDocCountError, reducers, metaData);
|
||||
bucketOrds = new LongHash(1, aggregationContext.bigArrays());
|
||||
|
@ -416,7 +416,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
}
|
||||
final long ord = i - 1; // remember we do +1 when counting
|
||||
final long globalOrd = mapping == null ? ord : mapping.getGlobalOrd(ord);
|
||||
incrementBucketDocCount(globalOrd, inc);
|
||||
incrementBucketDocCount(globalOrd, inc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -166,8 +166,7 @@ public abstract class InternalTerms<A extends InternalTerms, B extends InternalT
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
Multimap<Object, InternalTerms.Bucket> buckets = ArrayListMultimap.create();
|
||||
long sumDocCountError = 0;
|
||||
|
|
|
@ -47,11 +47,11 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
|
||||
private final ValuesSource valuesSource;
|
||||
protected final BytesRefHash bucketOrds;
|
||||
private final IncludeExclude includeExclude;
|
||||
private final IncludeExclude.StringFilter includeExclude;
|
||||
|
||||
public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource,
|
||||
Terms.Order order, BucketCountThresholds bucketCountThresholds,
|
||||
IncludeExclude includeExclude, AggregationContext aggregationContext,
|
||||
IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext,
|
||||
Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<Reducer> reducers,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public abstract class TermsAggregator extends BucketsAggregator {
|
|||
private Explicit<Long> shardMinDocCount;
|
||||
private Explicit<Integer> requiredSize;
|
||||
private Explicit<Integer> shardSize;
|
||||
|
||||
|
||||
public BucketCountThresholds(long minDocCount, long shardMinDocCount, int requiredSize, int shardSize) {
|
||||
this.minDocCount = new Explicit<>(minDocCount, false);
|
||||
this.shardMinDocCount = new Explicit<>(shardMinDocCount, false);
|
||||
|
@ -159,7 +159,9 @@ public abstract class TermsAggregator extends BucketsAggregator {
|
|||
|
||||
@Override
|
||||
protected boolean shouldDefer(Aggregator aggregator) {
|
||||
return (collectMode == SubAggCollectionMode.BREADTH_FIRST) && (!aggsUsedForSorting.contains(aggregator));
|
||||
return collectMode == SubAggCollectionMode.BREADTH_FIRST
|
||||
&& aggregator.needsScores() == false
|
||||
&& !aggsUsedForSorting.contains(aggregator);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -52,7 +52,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
Terms.Order order, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
|
||||
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, includeExclude,
|
||||
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();
|
||||
return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
|
||||
}
|
||||
|
||||
|
@ -68,7 +69,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource,
|
||||
Terms.Order order, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
|
||||
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,7 +85,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource,
|
||||
Terms.Order order, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
|
||||
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
|
||||
return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
|
||||
|
@ -37,9 +38,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder<TermsBuilder> {
|
|||
private Terms.ValueType valueType;
|
||||
private Terms.Order order;
|
||||
private String includePattern;
|
||||
private int includeFlags;
|
||||
private String excludePattern;
|
||||
private int excludeFlags;
|
||||
private String executionHint;
|
||||
private SubAggCollectionMode collectionMode;
|
||||
private Boolean showTermDocCountError;
|
||||
|
@ -88,26 +87,15 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder<TermsBuilder> {
|
|||
|
||||
/**
|
||||
* Define a regular expression that will determine what terms should be aggregated. The regular expression is based
|
||||
* on the {@link java.util.regex.Pattern} class.
|
||||
* on the {@link RegExp} class.
|
||||
*
|
||||
* @see #include(String, int)
|
||||
* @see {@link RegExp#RegExp(String)}
|
||||
*/
|
||||
public TermsBuilder include(String regex) {
|
||||
return include(regex, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a regular expression that will determine what terms should be aggregated. The regular expression is based
|
||||
* on the {@link java.util.regex.Pattern} class.
|
||||
*
|
||||
* @see java.util.regex.Pattern#compile(String, int)
|
||||
*/
|
||||
public TermsBuilder include(String regex, int flags) {
|
||||
if (includeTerms != null) {
|
||||
throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of strings or a regex, not both");
|
||||
}
|
||||
this.includePattern = regex;
|
||||
this.includeFlags = flags;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -160,29 +148,18 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder<TermsBuilder> {
|
|||
}
|
||||
return termsAsString;
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a regular expression that will filter out terms that should be excluded from the aggregation. The regular
|
||||
* expression is based on the {@link java.util.regex.Pattern} class.
|
||||
*
|
||||
* @see #exclude(String, int)
|
||||
*/
|
||||
public TermsBuilder exclude(String regex) {
|
||||
return exclude(regex, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a regular expression that will filter out terms that should be excluded from the aggregation. The regular
|
||||
* expression is based on the {@link java.util.regex.Pattern} class.
|
||||
* expression is based on the {@link RegExp} class.
|
||||
*
|
||||
* @see java.util.regex.Pattern#compile(String, int)
|
||||
* @see {@link RegExp#RegExp(String)}
|
||||
*/
|
||||
public TermsBuilder exclude(String regex, int flags) {
|
||||
public TermsBuilder exclude(String regex) {
|
||||
if (excludeTerms != null) {
|
||||
throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both");
|
||||
}
|
||||
this.excludePattern = regex;
|
||||
this.excludeFlags = flags;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -287,27 +264,13 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder<TermsBuilder> {
|
|||
builder.array("include", includeTerms);
|
||||
}
|
||||
if (includePattern != null) {
|
||||
if (includeFlags == 0) {
|
||||
builder.field("include", includePattern);
|
||||
} else {
|
||||
builder.startObject("include")
|
||||
.field("pattern", includePattern)
|
||||
.field("flags", includeFlags)
|
||||
.endObject();
|
||||
}
|
||||
builder.field("include", includePattern);
|
||||
}
|
||||
if (excludeTerms != null) {
|
||||
builder.array("exclude", excludeTerms);
|
||||
}
|
||||
if (excludePattern != null) {
|
||||
if (excludeFlags == 0) {
|
||||
builder.field("exclude", excludePattern);
|
||||
} else {
|
||||
builder.startObject("exclude")
|
||||
.field("pattern", excludePattern)
|
||||
.field("flags", excludeFlags)
|
||||
.endObject();
|
||||
}
|
||||
builder.field("exclude", excludePattern);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TermsParser implements Aggregator.Parser {
|
|||
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
|
||||
TermsParametersParser aggParser = new TermsParametersParser();
|
||||
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context).scriptable(true).formattable(true).build();
|
||||
IncludeExclude.Parser incExcParser = new IncludeExclude.Parser(aggregationName, StringTerms.TYPE, context);
|
||||
IncludeExclude.Parser incExcParser = new IncludeExclude.Parser();
|
||||
aggParser.parse(aggregationName, parser, context, vsParser, incExcParser);
|
||||
|
||||
List<OrderElement> orderElements = aggParser.getOrderElements();
|
||||
|
|
|
@ -99,10 +99,10 @@ public class UnmappedTerms extends InternalTerms<UnmappedTerms, InternalTerms.Bu
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
for (InternalAggregation agg : reduceContext.aggregations()) {
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
for (InternalAggregation agg : aggregations) {
|
||||
if (!(agg instanceof UnmappedTerms)) {
|
||||
return agg.reduce(reduceContext);
|
||||
return agg.reduce(aggregations, reduceContext);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
|
|
|
@ -20,22 +20,30 @@ package org.elasticsearch.search.aggregations.bucket.terms.support;
|
|||
|
||||
import com.carrotsearch.hppc.LongOpenHashSet;
|
||||
import com.carrotsearch.hppc.LongSet;
|
||||
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.*;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LongBitSet;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.automaton.Automata;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.ByteRunAutomaton;
|
||||
import org.apache.lucene.util.automaton.CompiledAutomaton;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
/**
|
||||
* Defines the include/exclude regular expression filtering for string terms aggregation. In this filtering logic,
|
||||
|
@ -43,8 +51,8 @@ import java.util.regex.Pattern;
|
|||
*/
|
||||
public class IncludeExclude {
|
||||
|
||||
// The includeValue and excludeValue ByteRefs which are the result of the parsing
|
||||
// process are converted into a LongFilter when used on numeric fields
|
||||
// The includeValue and excludeValue ByteRefs which are the result of the parsing
|
||||
// process are converted into a LongFilter when used on numeric fields
|
||||
// in the index.
|
||||
public static class LongFilter {
|
||||
private LongSet valids;
|
||||
|
@ -72,152 +80,145 @@ public class IncludeExclude {
|
|||
}
|
||||
}
|
||||
|
||||
private final Matcher include;
|
||||
private final Matcher exclude;
|
||||
private final CharsRefBuilder scratch = new CharsRefBuilder();
|
||||
private Set<BytesRef> includeValues;
|
||||
private Set<BytesRef> excludeValues;
|
||||
private final boolean hasRegexTest;
|
||||
// Only used for the 'map' execution mode (ie. scripts)
|
||||
public static class StringFilter {
|
||||
|
||||
private final ByteRunAutomaton runAutomaton;
|
||||
|
||||
private StringFilter(Automaton automaton) {
|
||||
this.runAutomaton = new ByteRunAutomaton(automaton);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the given value is accepted based on the {@code include} & {@code exclude} patterns.
|
||||
*/
|
||||
public boolean accept(BytesRef value) {
|
||||
return runAutomaton.run(value.bytes, value.offset, value.length);
|
||||
}
|
||||
}
|
||||
|
||||
public static class OrdinalsFilter {
|
||||
|
||||
private final CompiledAutomaton compiled;
|
||||
|
||||
private OrdinalsFilter(Automaton automaton) {
|
||||
this.compiled = new CompiledAutomaton(automaton);
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes which global ordinals are accepted by this IncludeExclude instance.
|
||||
*/
|
||||
public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException {
|
||||
LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());
|
||||
TermsEnum globalTermsEnum;
|
||||
Terms globalTerms = new DocValuesTerms(globalOrdinals);
|
||||
// TODO: specialize based on compiled.type: for ALL and prefixes (sinkState >= 0 ) we can avoid i/o and just set bits.
|
||||
globalTermsEnum = compiled.getTermsEnum(globalTerms);
|
||||
for (BytesRef term = globalTermsEnum.next(); term != null; term = globalTermsEnum.next()) {
|
||||
acceptedGlobalOrdinals.set(globalTermsEnum.ord());
|
||||
}
|
||||
return acceptedGlobalOrdinals;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final RegExp include, exclude;
|
||||
private final SortedSet<BytesRef> includeValues, excludeValues;
|
||||
|
||||
/**
|
||||
* @param include The regular expression pattern for the terms to be included
|
||||
* (may only be {@code null} if one of the other arguments is none-null.
|
||||
* @param includeValues The terms to be included
|
||||
* (may only be {@code null} if one of the other arguments is none-null.
|
||||
* @param exclude The regular expression pattern for the terms to be excluded
|
||||
* (may only be {@code null} if one of the other arguments is none-null.
|
||||
* @param excludeValues The terms to be excluded
|
||||
* (may only be {@code null} if one of the other arguments is none-null.
|
||||
*/
|
||||
public IncludeExclude(Pattern include, Pattern exclude, Set<BytesRef> includeValues, Set<BytesRef> excludeValues) {
|
||||
assert includeValues != null || include != null ||
|
||||
exclude != null || excludeValues != null : "includes & excludes cannot both be null"; // otherwise IncludeExclude object should be null
|
||||
this.include = include != null ? include.matcher("") : null;
|
||||
this.exclude = exclude != null ? exclude.matcher("") : null;
|
||||
hasRegexTest = include != null || exclude != null;
|
||||
public IncludeExclude(RegExp include, RegExp exclude) {
|
||||
if (include == null && exclude == null) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
this.include = include;
|
||||
this.exclude = exclude;
|
||||
this.includeValues = null;
|
||||
this.excludeValues = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param includeValues The terms to be included
|
||||
* @param excludeValues The terms to be excluded
|
||||
*/
|
||||
public IncludeExclude(SortedSet<BytesRef> includeValues, SortedSet<BytesRef> excludeValues) {
|
||||
if (includeValues == null && excludeValues == null) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
this.include = null;
|
||||
this.exclude = null;
|
||||
this.includeValues = includeValues;
|
||||
this.excludeValues = excludeValues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the given value is accepted based on the {@code include} & {@code exclude} patterns.
|
||||
* Terms adapter around doc values.
|
||||
*/
|
||||
public boolean accept(BytesRef value) {
|
||||
private static class DocValuesTerms extends Terms {
|
||||
|
||||
if (hasRegexTest) {
|
||||
// We need to perform UTF8 to UTF16 conversion for use in the regex matching
|
||||
scratch.copyUTF8Bytes(value);
|
||||
}
|
||||
return isIncluded(value, scratch.get()) && !isExcluded(value, scratch.get());
|
||||
}
|
||||
|
||||
private boolean isIncluded(BytesRef value, CharsRef utf16Chars) {
|
||||
private final SortedSetDocValues values;
|
||||
|
||||
if ((includeValues == null) && (include == null)) {
|
||||
// No include criteria to be tested.
|
||||
return true;
|
||||
DocValuesTerms(SortedSetDocValues values) {
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
if (include != null) {
|
||||
if (include.reset(scratch.get()).matches()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum iterator(TermsEnum reuse) throws IOException {
|
||||
return values.termsEnum();
|
||||
}
|
||||
if (includeValues != null) {
|
||||
if (includeValues.contains(value)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long size() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
// Some include criteria was tested but no match found
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean isExcluded(BytesRef value, CharsRef utf16Chars) {
|
||||
if (exclude != null) {
|
||||
if (exclude.reset(scratch.get()).matches()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumTotalTermFreq() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
if (excludeValues != null) {
|
||||
if (excludeValues.contains(value)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumDocFreq() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
// No exclude criteria was tested or no match found
|
||||
return false;
|
||||
|
||||
@Override
|
||||
public int getDocCount() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasFreqs() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasOffsets() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPositions() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPayloads() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes which global ordinals are accepted by this IncludeExclude instance.
|
||||
*/
|
||||
public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) {
|
||||
LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());
|
||||
// There are 3 ways of populating this bitset:
|
||||
// 1) Looking up the global ordinals for known "include" terms
|
||||
// 2) Looking up the global ordinals for known "exclude" terms
|
||||
// 3) Traversing the term enum for all terms and running past regexes
|
||||
// Option 3 is known to be very slow in the case of high-cardinality fields and
|
||||
// should be avoided if possible.
|
||||
if (includeValues != null) {
|
||||
// optimize for the case where the set of accepted values is a set
|
||||
// of known terms, not a regex that would have to be tested against all terms in the index
|
||||
for (BytesRef includeValue : includeValues) {
|
||||
// We need to perform UTF8 to UTF16 conversion for use in the regex matching
|
||||
scratch.copyUTF8Bytes(includeValue);
|
||||
if (!isExcluded(includeValue, scratch.get())) {
|
||||
long ord = globalOrdinals.lookupTerm(includeValue);
|
||||
if (ord >= 0) {
|
||||
acceptedGlobalOrdinals.set(ord);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if(hasRegexTest) {
|
||||
// We have includeVals that are a regex or only regex excludes - we need to do the potentially
|
||||
// slow option of hitting termsEnum for every term in the index.
|
||||
TermsEnum globalTermsEnum = globalOrdinals.termsEnum();
|
||||
try {
|
||||
for (BytesRef term = globalTermsEnum.next(); term != null; term = globalTermsEnum.next()) {
|
||||
if (accept(term)) {
|
||||
acceptedGlobalOrdinals.set(globalTermsEnum.ord());
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
}
|
||||
} else {
|
||||
// we only have a set of known values to exclude - create a bitset with all good values and negate the known bads
|
||||
acceptedGlobalOrdinals.set(0, acceptedGlobalOrdinals.length());
|
||||
for (BytesRef excludeValue : excludeValues) {
|
||||
long ord = globalOrdinals.lookupTerm(excludeValue);
|
||||
if (ord >= 0) {
|
||||
acceptedGlobalOrdinals.clear(ord);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return acceptedGlobalOrdinals;
|
||||
}
|
||||
|
||||
|
||||
public static class Parser {
|
||||
|
||||
private final String aggName;
|
||||
private final InternalAggregation.Type aggType;
|
||||
private final SearchContext context;
|
||||
|
||||
String include = null;
|
||||
int includeFlags = 0; // 0 means no flags
|
||||
String exclude = null;
|
||||
int excludeFlags = 0; // 0 means no flags
|
||||
Set<BytesRef> includeValues;
|
||||
Set<BytesRef> excludeValues;
|
||||
|
||||
public Parser(String aggName, InternalAggregation.Type aggType, SearchContext context) {
|
||||
this.aggName = aggName;
|
||||
this.aggType = aggType;
|
||||
this.context = context;
|
||||
}
|
||||
SortedSet<BytesRef> includeValues;
|
||||
SortedSet<BytesRef> excludeValues;
|
||||
|
||||
public boolean token(String currentFieldName, XContentParser.Token token, XContentParser parser) throws IOException {
|
||||
|
||||
|
@ -231,14 +232,14 @@ public class IncludeExclude {
|
|||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("include".equals(currentFieldName)) {
|
||||
includeValues = parseArrayToSet(parser);
|
||||
includeValues = new TreeSet<>(parseArrayToSet(parser));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if ("exclude".equals(currentFieldName)) {
|
||||
excludeValues = parseArrayToSet(parser);
|
||||
excludeValues = new TreeSet<>(parseArrayToSet(parser));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -252,12 +253,6 @@ public class IncludeExclude {
|
|||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if ("pattern".equals(currentFieldName)) {
|
||||
include = parser.text();
|
||||
} else if ("flags".equals(currentFieldName)) {
|
||||
includeFlags = Regex.flagsFromString(parser.text());
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if ("flags".equals(currentFieldName)) {
|
||||
includeFlags = parser.intValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -268,12 +263,6 @@ public class IncludeExclude {
|
|||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if ("pattern".equals(currentFieldName)) {
|
||||
exclude = parser.text();
|
||||
} else if ("flags".equals(currentFieldName)) {
|
||||
excludeFlags = Regex.flagsFromString(parser.text());
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if ("flags".equals(currentFieldName)) {
|
||||
excludeFlags = parser.intValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -298,19 +287,50 @@ public class IncludeExclude {
|
|||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
|
||||
public IncludeExclude includeExclude() {
|
||||
if (include == null && exclude == null && includeValues == null && excludeValues == null) {
|
||||
RegExp includePattern = include != null ? new RegExp(include) : null;
|
||||
RegExp excludePattern = exclude != null ? new RegExp(exclude) : null;
|
||||
if (includePattern != null || excludePattern != null) {
|
||||
if (includeValues != null || excludeValues != null) {
|
||||
throw new ElasticsearchIllegalArgumentException("Can only use regular expression include/exclude or a set of values, not both");
|
||||
}
|
||||
return new IncludeExclude(includePattern, excludePattern);
|
||||
} else if (includeValues != null || excludeValues != null) {
|
||||
return new IncludeExclude(includeValues, excludeValues);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
Pattern includePattern = include != null ? Pattern.compile(include, includeFlags) : null;
|
||||
Pattern excludePattern = exclude != null ? Pattern.compile(exclude, excludeFlags) : null;
|
||||
return new IncludeExclude(includePattern, excludePattern, includeValues, excludeValues);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isRegexBased() {
|
||||
return hasRegexTest;
|
||||
return include != null || exclude != null;
|
||||
}
|
||||
|
||||
private Automaton toAutomaton() {
|
||||
Automaton a = null;
|
||||
if (include != null) {
|
||||
a = include.toAutomaton();
|
||||
} else if (includeValues != null) {
|
||||
a = Automata.makeStringUnion(includeValues);
|
||||
} else {
|
||||
a = Automata.makeAnyString();
|
||||
}
|
||||
if (exclude != null) {
|
||||
a = Operations.minus(a, exclude.toAutomaton(), Operations.DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
} else if (excludeValues != null) {
|
||||
a = Operations.minus(a, Automata.makeStringUnion(excludeValues), Operations.DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
public StringFilter convertToStringFilter() {
|
||||
return new StringFilter(toAutomaton());
|
||||
}
|
||||
|
||||
public OrdinalsFilter convertToOrdinalsFilter() {
|
||||
return new OrdinalsFilter(toAutomaton());
|
||||
}
|
||||
|
||||
public LongFilter convertToLongFilter() {
|
||||
|
@ -329,6 +349,7 @@ public class IncludeExclude {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public LongFilter convertToDoubleFilter() {
|
||||
int numValids = includeValues == null ? 0 : includeValues.size();
|
||||
int numInvalids = excludeValues == null ? 0 : excludeValues.size();
|
||||
|
|
|
@ -82,10 +82,10 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAvg doReduce(ReduceContext reduceContext) {
|
||||
public InternalAvg doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
long count = 0;
|
||||
double sum = 0;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
count += ((InternalAvg) aggregation).count;
|
||||
sum += ((InternalAvg) aggregation).sum;
|
||||
}
|
||||
|
|
|
@ -101,8 +101,7 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
InternalCardinality reduced = null;
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
final InternalCardinality cardinality = (InternalCardinality) aggregation;
|
||||
|
|
|
@ -75,7 +75,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
double top = Double.NEGATIVE_INFINITY;
|
||||
double bottom = Double.POSITIVE_INFINITY;
|
||||
double posLeft = Double.POSITIVE_INFINITY;
|
||||
|
@ -83,7 +83,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo
|
|||
double negLeft = Double.POSITIVE_INFINITY;
|
||||
double negRight = Double.NEGATIVE_INFINITY;
|
||||
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalGeoBounds bounds = (InternalGeoBounds) aggregation;
|
||||
|
||||
if (bounds.top > top) {
|
||||
|
|
|
@ -79,9 +79,9 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalMax doReduce(ReduceContext reduceContext) {
|
||||
public InternalMax doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
double max = Double.NEGATIVE_INFINITY;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
max = Math.max(max, ((InternalMax) aggregation).max);
|
||||
}
|
||||
return new InternalMax(name, max, valueFormatter, reducers(), getMetaData());
|
||||
|
|
|
@ -80,9 +80,9 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalMin doReduce(ReduceContext reduceContext) {
|
||||
public InternalMin doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
double min = Double.POSITIVE_INFINITY;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
min = Math.min(min, ((InternalMin) aggregation).min);
|
||||
}
|
||||
return new InternalMin(getName(), min, this.valueFormatter, reducers(), getMetaData());
|
||||
|
|
|
@ -62,8 +62,7 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega
|
|||
public abstract double value(double key);
|
||||
|
||||
@Override
|
||||
public AbstractInternalPercentiles doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public AbstractInternalPercentiles doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
TDigestState merged = null;
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
final AbstractInternalPercentiles percentiles = (AbstractInternalPercentiles) aggregation;
|
||||
|
|
|
@ -83,13 +83,13 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
List<Object> aggregationObjects = new ArrayList<>();
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalScriptedMetric mapReduceAggregation = (InternalScriptedMetric) aggregation;
|
||||
aggregationObjects.add(mapReduceAggregation.aggregation());
|
||||
}
|
||||
InternalScriptedMetric firstAggregation = ((InternalScriptedMetric) reduceContext.aggregations().get(0));
|
||||
InternalScriptedMetric firstAggregation = ((InternalScriptedMetric) aggregations.get(0));
|
||||
Object aggregation;
|
||||
if (firstAggregation.reduceScript != null) {
|
||||
Map<String, Object> params;
|
||||
|
|
|
@ -151,12 +151,12 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalStats doReduce(ReduceContext reduceContext) {
|
||||
public InternalStats doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
long count = 0;
|
||||
double min = Double.POSITIVE_INFINITY;
|
||||
double max = Double.NEGATIVE_INFINITY;
|
||||
double sum = 0;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalStats stats = (InternalStats) aggregation;
|
||||
count += stats.getCount();
|
||||
min = Math.min(min, stats.getMin());
|
||||
|
|
|
@ -146,13 +146,13 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalExtendedStats doReduce(ReduceContext reduceContext) {
|
||||
public InternalExtendedStats doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
double sumOfSqrs = 0;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalExtendedStats stats = (InternalExtendedStats) aggregation;
|
||||
sumOfSqrs += stats.getSumOfSquares();
|
||||
}
|
||||
final InternalStats stats = super.doReduce(reduceContext);
|
||||
final InternalStats stats = super.doReduce(aggregations, reduceContext);
|
||||
return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma,
|
||||
valueFormatter, reducers(), getMetaData());
|
||||
}
|
||||
|
|
|
@ -79,9 +79,9 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalSum doReduce(ReduceContext reduceContext) {
|
||||
public InternalSum doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
double sum = 0;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
sum += ((InternalSum) aggregation).sum;
|
||||
}
|
||||
return new InternalSum(name, sum, valueFormatter, reducers(), getMetaData());
|
||||
|
|
|
@ -87,8 +87,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
List<InternalAggregation> aggregations = reduceContext.aggregations();
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
InternalSearchHits[] shardHits = new InternalSearchHits[aggregations.size()];
|
||||
|
||||
final TopDocs reducedTopDocs;
|
||||
|
@ -98,8 +97,8 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
|
|||
if (topDocs instanceof TopFieldDocs) {
|
||||
Sort sort = new Sort(((TopFieldDocs) topDocs).fields);
|
||||
shardDocs = new TopFieldDocs[aggregations.size()];
|
||||
for (int i = 0; i < shardDocs.length; i++) {
|
||||
InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
|
||||
for (int i = 0; i < shardDocs.length; i++) {
|
||||
InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
|
||||
shardDocs[i] = (TopFieldDocs) topHitsAgg.topDocs;
|
||||
shardHits[i] = topHitsAgg.searchHits;
|
||||
}
|
||||
|
@ -108,11 +107,11 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
|
|||
shardDocs = new TopDocs[aggregations.size()];
|
||||
for (int i = 0; i < shardDocs.length; i++) {
|
||||
InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
|
||||
shardDocs[i] = topHitsAgg.topDocs;
|
||||
shardHits[i] = topHitsAgg.searchHits;
|
||||
}
|
||||
shardDocs[i] = topHitsAgg.topDocs;
|
||||
shardHits[i] = topHitsAgg.searchHits;
|
||||
}
|
||||
reducedTopDocs = TopDocs.merge(from, size, shardDocs);
|
||||
}
|
||||
}
|
||||
|
||||
final int[] tracker = new int[shardHits.length];
|
||||
InternalSearchHit[] hits = new InternalSearchHit[reducedTopDocs.scoreDocs.length];
|
||||
|
|
|
@ -79,9 +79,9 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
long valueCount = 0;
|
||||
for (InternalAggregation aggregation : reduceContext.aggregations()) {
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
valueCount += ((InternalValueCount) aggregation).value;
|
||||
}
|
||||
return new InternalValueCount(name, valueCount, valueFormatter, reducers(), getMetaData());
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationStreams;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
|
||||
import org.elasticsearch.search.aggregations.support.format.ValueFormatter;
|
||||
|
@ -75,7 +76,7 @@ public class InternalSimpleValue extends InternalNumericMetricsAggregation.Singl
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalMax doReduce(ReduceContext reduceContext) {
|
||||
public InternalMax doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
throw new UnsupportedOperationException("Not supported");
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ public class InternalBucketMetricValue extends InternalNumericMetricsAggregation
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(ReduceContext reduceContext) {
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
throw new UnsupportedOperationException("Not supported");
|
||||
}
|
||||
|
||||
|
|
|
@ -403,7 +403,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
|
||||
}
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(null, bigArrays, scriptService));
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
if (reducers != null) {
|
||||
List<InternalAggregation> newAggs = new ArrayList<>(Lists.transform(aggregations.asList(), Reducer.AGGREGATION_TRANFORM_FUNCTION));
|
||||
for (SiblingReducer reducer : reducers) {
|
||||
InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(null, bigArrays,
|
||||
InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays,
|
||||
scriptService));
|
||||
newAggs.add(newAgg);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.benchmark.search.aggregations;
|
||||
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.StopWatch;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.client.Requests.createIndexRequest;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
|
||||
public class IncludeExcludeAggregationSearchBenchmark {
|
||||
|
||||
private static final Random R = new Random();
|
||||
private static final String CLUSTER_NAME = IncludeExcludeAggregationSearchBenchmark.class.getSimpleName();
|
||||
private static final int NUM_DOCS = 10000000;
|
||||
private static final int BATCH = 100;
|
||||
private static final int WARM = 3;
|
||||
private static final int RUNS = 10;
|
||||
private static final int ITERS = 3;
|
||||
|
||||
public static void main(String[] args) {
|
||||
Settings settings = settingsBuilder()
|
||||
.put("index.refresh_interval", "-1")
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
|
||||
Node[] nodes = new Node[1];
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
nodes[i] = nodeBuilder().clusterName(CLUSTER_NAME)
|
||||
.settings(settingsBuilder().put(settings).put("name", "node" + i))
|
||||
.node();
|
||||
}
|
||||
|
||||
Node clientNode = nodeBuilder()
|
||||
.clusterName(CLUSTER_NAME)
|
||||
.settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
|
||||
|
||||
Client client = clientNode.client();
|
||||
|
||||
try {
|
||||
client.admin().indices().create(createIndexRequest("index").settings(settings).mapping("type",
|
||||
jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("str")
|
||||
.field("type", "string")
|
||||
.field("index", "not_analyzed")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject())).actionGet();
|
||||
|
||||
System.out.println("Indexing " + NUM_DOCS + " documents");
|
||||
|
||||
StopWatch stopWatch = new StopWatch().start();
|
||||
for (int i = 0; i < NUM_DOCS; ) {
|
||||
BulkRequestBuilder request = client.prepareBulk();
|
||||
for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
|
||||
request.add(client.prepareIndex("index", "type", Integer.toString(i)).setSource("str", TestUtil.randomSimpleString(R)));
|
||||
++i;
|
||||
}
|
||||
BulkResponse response = request.execute().actionGet();
|
||||
if (response.hasFailures()) {
|
||||
System.err.println("--> failures...");
|
||||
System.err.println(response.buildFailureMessage());
|
||||
}
|
||||
if ((i % 100000) == 0) {
|
||||
System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
|
||||
stopWatch.start();
|
||||
}
|
||||
}
|
||||
|
||||
client.admin().indices().prepareRefresh("index").execute().actionGet();
|
||||
} catch (Exception e) {
|
||||
System.out.println("Index already exists, skipping index creation");
|
||||
}
|
||||
|
||||
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
|
||||
if (clusterHealthResponse.isTimedOut()) {
|
||||
System.err.println("--> Timed out waiting for cluster health");
|
||||
}
|
||||
|
||||
for (int i = 0; i < WARM + RUNS; ++i) {
|
||||
if (i >= WARM) {
|
||||
System.out.println("RUN " + (i - WARM));
|
||||
}
|
||||
long start = System.nanoTime();
|
||||
SearchResponse resp = null;
|
||||
for (int j = 0; j < ITERS; ++j) {
|
||||
resp = client.prepareSearch("index").setQuery(QueryBuilders.prefixQuery("str", "sf")).setSize(0).addAggregation(terms("t").field("str").include("s.*")).execute().actionGet();
|
||||
}
|
||||
long end = System.nanoTime();
|
||||
if (i >= WARM) {
|
||||
System.out.println(new TimeValue((end - start) / ITERS, TimeUnit.NANOSECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -31,11 +31,14 @@ import org.elasticsearch.action.search.SearchResponse;
|
|||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.merge.policy.MergePolicyModule;
|
||||
import org.elasticsearch.index.query.FilterBuilders;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.rest.action.admin.indices.upgrade.UpgradeTest;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
|
@ -46,6 +49,7 @@ import org.elasticsearch.search.sort.SortOrder;
|
|||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -97,11 +101,12 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
|
|||
return ImmutableSettings.builder()
|
||||
.put(Node.HTTP_ENABLED, true) // for _upgrade
|
||||
.put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) // disable merging so no segments will be upgraded
|
||||
.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files
|
||||
.build();
|
||||
}
|
||||
|
||||
void setupCluster() throws Exception {
|
||||
ListenableFuture<List<String>> replicas = internalCluster().startNodesAsync(2); // for replicas
|
||||
ListenableFuture<List<String>> replicas = internalCluster().startNodesAsync(1); // for replicas
|
||||
|
||||
Path dataDir = newTempDirPath(LifecycleScope.SUITE);
|
||||
ImmutableSettings.Builder nodeSettings = ImmutableSettings.builder()
|
||||
|
@ -153,6 +158,7 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
|
|||
}
|
||||
|
||||
void unloadIndex(String indexName) throws Exception {
|
||||
client().admin().indices().prepareFlush(indexName).setWaitIfOngoing(true).setForce(true).get(); // temporary for debugging
|
||||
ElasticsearchAssertions.assertAcked(client().admin().indices().prepareDelete(indexName).get());
|
||||
ElasticsearchAssertions.assertAllFilesClosed();
|
||||
}
|
||||
|
@ -190,12 +196,25 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
|
|||
}
|
||||
}
|
||||
|
||||
@LuceneTestCase.AwaitsFix(bugUrl = "times out often , see : https://github.com/elastic/elasticsearch/issues/10434")
|
||||
public void testOldIndexes() throws Exception {
|
||||
setupCluster();
|
||||
|
||||
Collections.shuffle(indexes, getRandom());
|
||||
for (String index : indexes) {
|
||||
if (index.equals("index-0.90.13.zip") == false) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
logger.info("--> Testing old index " + index);
|
||||
assertOldIndexWorks(index);
|
||||
logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@TestLogging("test.engine:TRACE,index.engine:TRACE,test.engine.lucene:TRACE,index.engine.lucene:TRACE")
|
||||
public void testShitSlowIndex() throws Exception {
|
||||
setupCluster();
|
||||
for (int i = 0; i < 5; i++) {
|
||||
String index = "index-0.90.13.zip";
|
||||
long startTime = System.currentTimeMillis();
|
||||
logger.info("--> Testing old index " + index);
|
||||
assertOldIndexWorks(index);
|
||||
|
@ -295,12 +314,15 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
|
|||
}
|
||||
|
||||
void assertNewReplicasWork(String indexName) throws Exception {
|
||||
final int numReplicas = randomIntBetween(1, 2);
|
||||
logger.debug("Creating [{}] replicas for index [{}]", numReplicas, indexName);
|
||||
final int numReplicas = 1;
|
||||
final long startTime = System.currentTimeMillis();
|
||||
logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, indexName);
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(ImmutableSettings.builder()
|
||||
.put("number_of_replicas", numReplicas)
|
||||
).execute().actionGet());
|
||||
ensureGreen(indexName);
|
||||
ensureGreen(TimeValue.timeValueMinutes(1), indexName);
|
||||
logger.debug("--> index [{}] is green, took [{}]", indexName, TimeValue.timeValueMillis(System.currentTimeMillis() - startTime));
|
||||
logger.debug("--> recovery status:\n{}", XContentHelper.toString(client().admin().indices().prepareRecoveries(indexName).get()));
|
||||
|
||||
// TODO: do something with the replicas! query? index?
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.date;
|
|||
import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.NumericRangeFilter;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
|
@ -38,10 +39,12 @@ import org.elasticsearch.index.mapper.DocumentMapperParser;
|
|||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ElasticsearchSingleNodeTest;
|
||||
import org.elasticsearch.test.TestSearchContext;
|
||||
|
@ -55,8 +58,8 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.string.SimpleStringMappingTests.docValuesType;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.elasticsearch.index.mapper.string.SimpleStringMappingTests.docValuesType;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
@ -396,4 +399,37 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest {
|
|||
assertThat(dateFieldMapperMap.get("field"), is(instanceOf(Map.class)));
|
||||
return (Map<String, String>) dateFieldMapperMap.get("field");
|
||||
}
|
||||
|
||||
private static long getDateAsMillis(Document doc, String field) {
|
||||
for (IndexableField f : doc.getFields(field)) {
|
||||
if (f.numericValue() != null) {
|
||||
return f.numericValue().longValue();
|
||||
}
|
||||
}
|
||||
throw new AssertionError("missing");
|
||||
}
|
||||
|
||||
public void testNumericResolution() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("date_field").field("type", "date").field("format", "date_time").field("numeric_resolution", "seconds").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper defaultMapper = mapper(mapping);
|
||||
|
||||
// provided as an int
|
||||
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("date_field", 42)
|
||||
.endObject()
|
||||
.bytes());
|
||||
assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(42000L));
|
||||
|
||||
// provided as a string
|
||||
doc = defaultMapper.parse("type", "2", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("date_field", "43")
|
||||
.endObject()
|
||||
.bytes());
|
||||
assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(43000L));
|
||||
}
|
||||
}
|
|
@ -507,4 +507,23 @@ public class RecoveryStateTest extends ElasticsearchTestCase {
|
|||
readWriteIndex.join();
|
||||
assertThat(readWriteIndex.error.get(), equalTo(null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFileHashCodeAndEquals() {
|
||||
File f = new File("foo", randomIntBetween(0, 100), randomBoolean());
|
||||
File anotherFile = new File(f.name(), f.length(), f.reused());
|
||||
assertEquals(f, anotherFile);
|
||||
assertEquals(f.hashCode(), anotherFile.hashCode());
|
||||
int iters = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
f = new File("foo", randomIntBetween(0, 100), randomBoolean());
|
||||
anotherFile = new File(f.name(), randomIntBetween(0, 100), randomBoolean());
|
||||
if (f.equals(anotherFile)) {
|
||||
assertEquals(f.hashCode(), anotherFile.hashCode());
|
||||
} else if (f.hashCode() != anotherFile.hashCode()) {
|
||||
assertFalse(f.equals(anotherFile));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,10 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory
|
|||
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
|
@ -67,7 +70,7 @@ import static org.hamcrest.core.IsNull.notNullValue;
|
|||
* the growth of dynamic arrays is tested.
|
||||
*/
|
||||
@Slow
|
||||
public class RandomTests extends ElasticsearchIntegrationTest {
|
||||
public class EquivalenceTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
// Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported
|
||||
// Duel with filters
|
||||
|
@ -380,4 +383,56 @@ public class RandomTests extends ElasticsearchIntegrationTest {
|
|||
assertEquals(value >= 6 ? value : 0, sum.getValue(), 0d);
|
||||
}
|
||||
|
||||
private void assertEquals(Terms t1, Terms t2) {
|
||||
List<Terms.Bucket> t1Buckets = t1.getBuckets();
|
||||
List<Terms.Bucket> t2Buckets = t1.getBuckets();
|
||||
assertEquals(t1Buckets.size(), t2Buckets.size());
|
||||
for (Iterator<Terms.Bucket> it1 = t1Buckets.iterator(), it2 = t2Buckets.iterator(); it1.hasNext(); ) {
|
||||
final Terms.Bucket b1 = it1.next();
|
||||
final Terms.Bucket b2 = it2.next();
|
||||
assertEquals(b1.getDocCount(), b2.getDocCount());
|
||||
assertEquals(b1.getKey(), b2.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
public void testDuelDepthBreadthFirst() throws Exception {
|
||||
createIndex("idx");
|
||||
final int numDocs = randomIntBetween(100, 500);
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
final int v1 = randomInt(1 << randomInt(7));
|
||||
final int v2 = randomInt(1 << randomInt(7));
|
||||
final int v3 = randomInt(1 << randomInt(7));
|
||||
reqs.add(client().prepareIndex("idx", "type").setSource("f1", v1, "f2", v2, "f3", v3));
|
||||
}
|
||||
indexRandom(true, reqs);
|
||||
|
||||
final SearchResponse r1 = client().prepareSearch("idx").addAggregation(
|
||||
terms("f1").field("f1").collectMode(SubAggCollectionMode.DEPTH_FIRST)
|
||||
.subAggregation(terms("f2").field("f2").collectMode(SubAggCollectionMode.DEPTH_FIRST)
|
||||
.subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)))).get();
|
||||
assertSearchResponse(r1);
|
||||
final SearchResponse r2 = client().prepareSearch("idx").addAggregation(
|
||||
terms("f1").field("f1").collectMode(SubAggCollectionMode.BREADTH_FIRST)
|
||||
.subAggregation(terms("f2").field("f2").collectMode(SubAggCollectionMode.BREADTH_FIRST)
|
||||
.subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)))).get();
|
||||
assertSearchResponse(r2);
|
||||
|
||||
final Terms t1 = r1.getAggregations().get("f1");
|
||||
final Terms t2 = r2.getAggregations().get("f1");
|
||||
assertEquals(t1, t2);
|
||||
for (Terms.Bucket b1 : t1.getBuckets()) {
|
||||
final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString());
|
||||
final Terms sub1 = b1.getAggregations().get("f2");
|
||||
final Terms sub2 = b2.getAggregations().get("f2");
|
||||
assertEquals(sub1, sub2);
|
||||
for (Terms.Bucket subB1 : sub1.getBuckets()) {
|
||||
final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString());
|
||||
final Terms subSub1 = subB1.getAggregations().get("f3");
|
||||
final Terms subSub2 = subB2.getAggregations().get("f3");
|
||||
assertEquals(subSub1, subSub2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -387,86 +387,6 @@ public class StringTermsTests extends AbstractTermsTests {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void singleValueField_WithRegexFiltering_WithFlags() throws Exception {
|
||||
|
||||
// include without exclude
|
||||
// we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
|
||||
// with case insensitive flag on the include regex
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
|
||||
.addAggregation(terms("terms")
|
||||
.executionHint(randomExecutionHint())
|
||||
.field(SINGLE_VALUED_FIELD_NAME)
|
||||
.collectMode(randomFrom(SubAggCollectionMode.values())).include("VAL00.+", Pattern.CASE_INSENSITIVE))
|
||||
.execute().actionGet();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
Terms terms = response.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
assertThat(terms.getName(), equalTo("terms"));
|
||||
assertThat(terms.getBuckets().size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(key(bucket), equalTo("val00" + i));
|
||||
assertThat(bucket.getDocCount(), equalTo(1l));
|
||||
}
|
||||
|
||||
// include and exclude
|
||||
// we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
|
||||
// with multi-flag masking on the exclude regex
|
||||
|
||||
response = client().prepareSearch("idx").setTypes("high_card_type")
|
||||
.addAggregation(terms("terms")
|
||||
.executionHint(randomExecutionHint())
|
||||
.field(SINGLE_VALUED_FIELD_NAME)
|
||||
.collectMode(randomFrom(SubAggCollectionMode.values())).include("val00.+").exclude("( val000 | VAL001 )#this is a comment", Pattern.CASE_INSENSITIVE | Pattern.COMMENTS))
|
||||
.execute().actionGet();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
terms = response.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
assertThat(terms.getName(), equalTo("terms"));
|
||||
assertThat(terms.getBuckets().size(), equalTo(8));
|
||||
|
||||
for (int i = 2; i < 10; i++) {
|
||||
Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(key(bucket), equalTo("val00" + i));
|
||||
assertThat(bucket.getDocCount(), equalTo(1l));
|
||||
}
|
||||
|
||||
// exclude without include
|
||||
// we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
|
||||
// with a "no flag" flag
|
||||
|
||||
response = client().prepareSearch("idx").setTypes("high_card_type")
|
||||
.addAggregation(terms("terms")
|
||||
.executionHint(randomExecutionHint())
|
||||
.field(SINGLE_VALUED_FIELD_NAME)
|
||||
.collectMode(randomFrom(SubAggCollectionMode.values())).exclude("val0[1-9]+.+", 0))
|
||||
.execute().actionGet();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
terms = response.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
assertThat(terms.getName(), equalTo("terms"));
|
||||
assertThat(terms.getBuckets().size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(key(bucket), equalTo("val00" + i));
|
||||
assertThat(bucket.getDocCount(), equalTo(1l));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void singleValueField_WithExactTermFiltering() throws Exception {
|
||||
// include without exclude
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.junit.Test;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder;
|
||||
|
@ -271,6 +270,38 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBreadthFirst() throws Exception {
|
||||
// breadth_first will be ignored since we need scores
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||
.addAggregation(terms("terms")
|
||||
.executionHint(randomExecutionHint())
|
||||
.collectMode(SubAggCollectionMode.BREADTH_FIRST)
|
||||
.field(TERMS_AGGS_FIELD)
|
||||
.subAggregation(topHits("hits").setSize(3))
|
||||
).get();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
Terms terms = response.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
assertThat(terms.getName(), equalTo("terms"));
|
||||
assertThat(terms.getBuckets().size(), equalTo(5));
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
Terms.Bucket bucket = terms.getBucketByKey("val" + i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(key(bucket), equalTo("val" + i));
|
||||
assertThat(bucket.getDocCount(), equalTo(10l));
|
||||
TopHits topHits = bucket.getAggregations().get("hits");
|
||||
SearchHits hits = topHits.getHits();
|
||||
assertThat(hits.totalHits(), equalTo(10l));
|
||||
assertThat(hits.getHits().length, equalTo(3));
|
||||
|
||||
assertThat(hits.getAt(0).sourceAsMap().size(), equalTo(4));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasics_getProperty() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
|
@ -531,37 +562,6 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(e.getMessage(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations"));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFailDeferredOnlyWhenScorerIsUsed() throws Exception {
|
||||
// No track_scores or score based sort defined in top_hits agg, so don't fail:
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setTypes("type")
|
||||
.addAggregation(
|
||||
terms("terms").executionHint(randomExecutionHint()).field(TERMS_AGGS_FIELD)
|
||||
.collectMode(SubAggCollectionMode.BREADTH_FIRST)
|
||||
.subAggregation(topHits("hits").addSort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
|
||||
// Score based, so fail with deferred aggs:
|
||||
try {
|
||||
client().prepareSearch("idx")
|
||||
.setTypes("type")
|
||||
.addAggregation(
|
||||
terms("terms").executionHint(randomExecutionHint()).field(TERMS_AGGS_FIELD)
|
||||
.collectMode(SubAggCollectionMode.BREADTH_FIRST)
|
||||
.subAggregation(topHits("hits")))
|
||||
.get();
|
||||
fail();
|
||||
} catch (Exception e) {
|
||||
// It is considered a parse failure if the search request asks for top_hits
|
||||
// under an aggregation with collect_mode set to breadth_first as this would
|
||||
// require the buffering of scores alongside each document ID and that is a
|
||||
// a RAM cost we are not willing to pay
|
||||
assertThat(e.getMessage(), containsString("ElasticsearchIllegalStateException"));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyIndex() throws Exception {
|
||||
|
|
|
@ -125,6 +125,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeLuceneTestCase
|
|||
searchContext.aggregations(new SearchContextAggregations(factories));
|
||||
Aggregator[] aggs = factories.createTopLevelAggregators(context);
|
||||
BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs));
|
||||
collector.preCollection();
|
||||
// A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here (otherwise MatchAllDocsQuery would be sufficient)
|
||||
// We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because
|
||||
// root doc type#3 and root doc type#1 have the same segment docid
|
||||
|
|
|
@ -86,13 +86,14 @@ public class MockFSDirectoryService extends FsDirectoryService {
|
|||
public void beforeIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard,
|
||||
@IndexSettings Settings indexSettings) {
|
||||
if (indexShard != null && shardId.equals(sid)) {
|
||||
logger.info("Shard state before potentially flushing is {}", indexShard.state());
|
||||
logger.info("{} shard state before potentially flushing is {}", indexShard.shardId(), indexShard.state());
|
||||
if (validCheckIndexStates.contains(indexShard.state()) && IndexMetaData.isOnSharedFilesystem(indexSettings) == false) {
|
||||
// When the the internal engine closes we do a rollback, which removes uncommitted segments
|
||||
// By doing a commit flush we perform a Lucene commit, but don't clear the translog,
|
||||
// so that even in tests where don't flush we can check the integrity of the Lucene index
|
||||
logger.info("{} flushing in order to run checkindex", indexShard.shardId());
|
||||
Releasables.close(indexShard.engine().snapshotIndex()); // Keep translog for tests that rely on replaying it
|
||||
logger.info("flush finished in beforeIndexShardClosed");
|
||||
logger.info("{} flush finished in beforeIndexShardClosed", indexShard.shardId());
|
||||
canRun = true;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue