[Rollup] Select best jobs then execute msearch-per-job (elastic/x-pack-elasticsearch#4152)

If there are multiple jobs that are all the "best" (e.g. share the
best interval) we have no way of knowing which is actually the best.
Unfortunately, we cannot just filter for all the jobs in a single
search because their doc_counts can potentially overlap.

To solve this, we execute an msearch-per-job so that the results
stay isolated.  When rewriting the response, we iteratively
unroll and reduce the independent msearch responses into a single
"working tree".  This allows us to intervene if there are
overlapping buckets and manually choose a doc_count.

Job selection is found by recursively descending through the aggregation
tree and independently pruning the list of valid job caps in each branch.
When a leaf node is reached in the branch, the remaining jobs are
sorted by "best'ness" (see comparator in RollupJobIdentifierUtils for the
implementation) and added to a global set of "best jobs". Once
all branches have been evaluated, the final set is returned to the
calling code.

Job "best'ness" is, briefly, the job(s) that have
 - The largest compatible date interval
 - Fewer and larger interval histograms
 - Fewer terms groups

Note: the final set of "best" jobs is not guaranteed to be minimal,
there may be redundant effort due to independent branches choosing
jobs that are subsets of other branches.

Related changes:
- We have to include the job's ID in the rollup doc's
hash, so that different jobs don't overwrite the same summary
document.
- Now that we iteratively reduce the agg tree, the agg framework
injects empty buckets while we're working.  In most cases this
is harmless, but for `avg` aggs the empty bucket is a SumAgg while
any unrolled versions are converted into AvgAggs... causing a cast
exception.  To get around this, avg's are renamed to
`{source_name}.value` to prevent a conflict
- The job filtering has been pushed up into a query filter, since it
applies to the entire msearch rather than just individual agg components
- We no longer add a filter agg clause about the date_histo's interval, because 
that is handled by the job validation and pruning.

Original commit: elastic/x-pack-elasticsearch@995be2a039
This commit is contained in:
Zachary Tong 2018-03-27 10:33:59 -07:00 committed by GitHub
parent f1a948bc54
commit 9cc33f4e29
21 changed files with 2669 additions and 967 deletions

View File

@ -103,6 +103,7 @@ aggregation has been used on the `temperature` field, yielding the following res
{
"took" : 102,
"timed_out" : false,
"terminated_early" : false,
"_shards" : ... ,
"hits" : {
"total" : 0,
@ -142,7 +143,7 @@ GET sensor_rollup/_rollup_search
--------------------------------------------------
// CONSOLE
// TEST[continued]
// TEST[catch:bad_request]
// TEST[catch:/illegal_argument_exception/]
[source,js]
----
@ -151,12 +152,12 @@ GET sensor_rollup/_rollup_search
"root_cause" : [
{
"type" : "illegal_argument_exception",
"reason" : "There is not a [avg] agg with name [temperature] configured in selected rollup indices, cannot translate aggregation.",
"reason" : "There is not a rollup job that has a [avg] agg with name [avg_temperature] which also satisfies all requirements of query.",
"stack_trace": ...
}
],
"type" : "illegal_argument_exception",
"reason" : "There is not a [avg] agg with name [temperature] configured in selected rollup indices, cannot translate aggregation.",
"reason" : "There is not a rollup job that has a [avg] agg with name [avg_temperature] which also satisfies all requirements of query.",
"stack_trace": ...
},
"status": 400
@ -204,6 +205,7 @@ The response to the above query will look as expected, despite spanning rollup a
{
"took" : 102,
"timed_out" : false,
"terminated_early" : false,
"_shards" : ... ,
"hits" : {
"total" : 0,

View File

@ -20,6 +20,7 @@ public class RollupField {
public static final String TIMESTAMP = "timestamp";
public static final String FILTER = "filter";
public static final String NAME = "rollup";
public static final String AGG = "agg";
/**
* Format to the appropriate Rollup field name convention
@ -59,6 +60,15 @@ public class RollupField {
return field + "." + RollupField.COUNT_FIELD;
}
/**
* Format to the appropriate Rollup convention for agg names that
* might conflict with empty buckets. `value` is appended to agg name.
* E.g. used for averages
*/
public static String formatValueAggName(String field) {
return field + "." + RollupField.VALUE;
}
/**
* Format into the convention for computed field lookups
*/

View File

@ -9,8 +9,13 @@ import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
import java.io.IOException;
@ -20,7 +25,11 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.BiConsumer;
/**
* Represents the Rollup capabilities for a specific job on a single rollup index
*/
public class RollupJobCaps implements Writeable, ToXContentObject {
private static ParseField JOB_ID = new ParseField("job_id");
private static ParseField ROLLUP_INDEX = new ParseField("rollup_index");

View File

@ -264,11 +264,14 @@ public class DateHistoGroupConfig implements Writeable, ToXContentFragment {
public DateHistoGroupConfig build() {
if (field == null || field.isEmpty()) {
throw new IllegalArgumentException("Parameter [" + FIELD + "] is mandatory.");
throw new IllegalArgumentException("Parameter [" + FIELD.getPreferredName() + "] is mandatory.");
}
if (timeZone == null) {
timeZone = DateTimeZone.UTC;
}
if (interval == null) {
throw new IllegalArgumentException("Parameter [" + INTERVAL.getPreferredName() + "] is mandatory.");
}
// validate interval
createRounding(interval.toString(), timeZone, INTERVAL.getPreferredName());
if (delay != null) {

View File

@ -29,6 +29,10 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.persistent.PersistentTasksExecutor;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.threadpool.ExecutorBuilder;
import org.elasticsearch.threadpool.FixedExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool;
@ -96,6 +100,9 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin
public static final Set<String> HEADER_FILTERS =
new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication"));
public static final List<String> SUPPORTED_METRICS = Arrays.asList(MaxAggregationBuilder.NAME, MinAggregationBuilder.NAME,
SumAggregationBuilder.NAME, AvgAggregationBuilder.NAME);
private final Settings settings;
private final boolean enabled;

View File

@ -0,0 +1,333 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.rollup;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig;
import org.joda.time.DateTimeZone;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* This class contains utilities to identify which jobs are the "best" for a given aggregation tree.
* It allows the caller to pass in a set of possible rollup job capabilities and get in return
* a smaller (but not guaranteed minimal) set of valid jobs that can be searched.
*/
public class RollupJobIdentifierUtils {
private static final Comparator<RollupJobCaps> COMPARATOR = RollupJobIdentifierUtils.getComparator();
/**
* Given the aggregation tree and a list of available job capabilities, this method will return a set
* of the "best" jobs that should be searched.
*
* It does this by recursively descending through the aggregation tree and independently pruning the
* list of valid job caps in each branch. When a leaf node is reached in the branch, the remaining
* jobs are sorted by "best'ness" (see {@link #getComparator()} for the implementation)
* and added to a global set of "best jobs".
*
* Once all branches have been evaluated, the final set is returned to the calling code.
*
* Job "best'ness" is, briefly, the job(s) that have
* - The larger compatible date interval
* - Fewer and larger interval histograms
* - Fewer terms groups
*
* Note: the final set of "best" jobs is not guaranteed to be minimal, there may be redundant effort
* due to independent branches choosing jobs that are subsets of other branches.
*
* @param source The source aggregation that we are trying to find jobs for
* @param jobCaps The total set of available job caps on the index/indices
* @return A set of the "best" jobs out of the total job caps
*/
public static Set<RollupJobCaps> findBestJobs(AggregationBuilder source, Set<RollupJobCaps> jobCaps) {
// TODO there is an opportunity to optimize the returned caps to find the minimal set of required caps.
// For example, one leaf may have equally good jobs [A,B], while another leaf finds only job [B] to be best.
// If job A is a subset of job B, we could simply search job B in isolation and get the same results
//
// We can't do that today, because we don't (yet) have way of determining if one job is a sub/super set of another
Set<RollupJobCaps> bestCaps = new HashSet<>();
doFindBestJobs(source, new ArrayList<>(jobCaps), bestCaps);
return bestCaps;
}
private static void doFindBestJobs(AggregationBuilder source, List<RollupJobCaps> jobCaps, Set<RollupJobCaps> bestCaps) {
if (source.getWriteableName().equals(DateHistogramAggregationBuilder.NAME)) {
checkDateHisto((DateHistogramAggregationBuilder) source, jobCaps, bestCaps);
} else if (source.getWriteableName().equals(HistogramAggregationBuilder.NAME)) {
checkHisto((HistogramAggregationBuilder) source, jobCaps, bestCaps);
} else if (Rollup.SUPPORTED_METRICS.contains(source.getWriteableName())) {
checkVSLeaf((ValuesSourceAggregationBuilder.LeafOnly) source, jobCaps, bestCaps);
} else if (source.getWriteableName().equals(TermsAggregationBuilder.NAME)) {
checkTerms((TermsAggregationBuilder)source, jobCaps, bestCaps);
} else {
throw new IllegalArgumentException("Unable to translate aggregation tree into Rollup. Aggregation ["
+ source.getName() + "] is of type [" + source.getClass().getSimpleName() + "] which is " +
"currently unsupported.");
}
}
/**
* Find the set of date_histo's with the largest granularity interval
*/
private static void checkDateHisto(DateHistogramAggregationBuilder source, List<RollupJobCaps> jobCaps,
Set<RollupJobCaps> bestCaps) {
ArrayList<RollupJobCaps> localCaps = new ArrayList<>();
for (RollupJobCaps cap : jobCaps) {
RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field());
if (fieldCaps != null) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) {
TimeValue interval = TimeValue.parseTimeValue((String)agg.get(RollupField.INTERVAL), "date_histogram.interval");
String thisTimezone = (String)agg.get(DateHistoGroupConfig.TIME_ZONE.getPreferredName());
String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString();
// Ensure we are working on the same timezone
if (thisTimezone.equalsIgnoreCase(sourceTimeZone) == false) {
continue;
}
if (source.dateHistogramInterval() != null) {
TimeValue sourceInterval = TimeValue.parseTimeValue(source.dateHistogramInterval().toString(),
"source.date_histogram.interval");
//TODO should be divisor of interval
if (interval.compareTo(sourceInterval) <= 0) {
localCaps.add(cap);
}
} else {
if (interval.getMillis() <= source.interval()) {
localCaps.add(cap);
}
}
break;
}
}
}
}
if (localCaps.isEmpty()) {
throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + "] agg on field [" +
source.field() + "] which also satisfies all requirements of query.");
}
// We are a leaf, save our best caps
if (source.getSubAggregations().size() == 0) {
bestCaps.add(getTopEqualCaps(localCaps));
} else {
// otherwise keep working down the tree
source.getSubAggregations().forEach(sub -> doFindBestJobs(sub, localCaps, bestCaps));
}
}
/**
* Find the set of histo's with the largest interval
*/
private static void checkHisto(HistogramAggregationBuilder source, List<RollupJobCaps> jobCaps, Set<RollupJobCaps> bestCaps) {
ArrayList<RollupJobCaps> localCaps = new ArrayList<>();
for (RollupJobCaps cap : jobCaps) {
RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field());
if (fieldCaps != null) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) {
Long interval = (long)agg.get(RollupField.INTERVAL);
// TODO should be divisor of interval
if (interval <= source.interval()) {
localCaps.add(cap);
}
break;
}
}
}
}
if (localCaps.isEmpty()) {
throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + "] agg on field [" +
source.field() + "] which also satisfies all requirements of query.");
}
// We are a leaf, save our best caps
if (source.getSubAggregations().size() == 0) {
bestCaps.add(getTopEqualCaps(localCaps));
} else {
// otherwise keep working down the tree
source.getSubAggregations().forEach(sub -> doFindBestJobs(sub, localCaps, bestCaps));
}
}
/**
* Ensure that the terms aggregation is supported by one or more job caps. There is no notion of "best"
* caps for terms, it is either supported or not.
*/
private static void checkTerms(TermsAggregationBuilder source, List<RollupJobCaps> jobCaps, Set<RollupJobCaps> bestCaps) {
ArrayList<RollupJobCaps> localCaps = new ArrayList<>();
for (RollupJobCaps cap : jobCaps) {
RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field());
if (fieldCaps != null) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get(RollupField.AGG).equals(TermsAggregationBuilder.NAME)) {
localCaps.add(cap);
break;
}
}
}
}
if (localCaps.isEmpty()) {
throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + "] agg on field [" +
source.field() + "] which also satisfies all requirements of query.");
}
// We are a leaf, save our best caps
if (source.getSubAggregations().size() == 0) {
bestCaps.add(getTopEqualCaps(localCaps));
} else {
// otherwise keep working down the tree
source.getSubAggregations().forEach(sub -> doFindBestJobs(sub, localCaps, bestCaps));
}
}
/**
* Ensure that the metrics are supported by one or more job caps. There is no notion of "best"
* caps for metrics, it is either supported or not.
*/
private static void checkVSLeaf(ValuesSourceAggregationBuilder.LeafOnly source, List<RollupJobCaps> jobCaps,
Set<RollupJobCaps> bestCaps) {
ArrayList<RollupJobCaps> localCaps = new ArrayList<>();
for (RollupJobCaps cap : jobCaps) {
RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field());
if (fieldCaps != null) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get(RollupField.AGG).equals(source.getWriteableName())) {
localCaps.add(cap);
break;
}
}
}
}
if (localCaps.isEmpty()) {
throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + "] agg with name [" +
source.getName() + "] which also satisfies all requirements of query.");
}
// Metrics are always leaves so go ahead and add to best caps
bestCaps.add(getTopEqualCaps(localCaps));
}
private static RollupJobCaps getTopEqualCaps(List<RollupJobCaps> caps) {
assert caps.isEmpty() == false;
caps.sort(COMPARATOR);
return caps.get(0);
}
private static Comparator<RollupJobCaps> getComparator() {
return (o1, o2) -> {
if (o1 == null) {
throw new NullPointerException("RollupJobCap [o1] cannot be null");
}
if (o2 == null) {
throw new NullPointerException("RollupJobCap [o2] cannot be null");
}
if (o1.equals(o2)) {
return 0;
}
TimeValue thisTime = null;
TimeValue thatTime = null;
// histogram intervals are averaged and compared, with the idea that
// a larger average == better, because it will generate fewer documents
float thisHistoWeights = 0;
float thatHistoWeights = 0;
long counter = 0;
// Similarly, fewer terms groups will generate fewer documents, so
// we count the number of terms groups
long thisTermsWeights = 0;
long thatTermsWeights = 0;
// Iterate over the first Caps and collect the various stats
for (RollupJobCaps.RollupFieldCaps fieldCaps : o1.getFieldCaps().values()) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) {
thisTime = TimeValue.parseTimeValue((String) agg.get(RollupField.INTERVAL), RollupField.INTERVAL);
} else if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) {
thisHistoWeights += (long) agg.get(RollupField.INTERVAL);
counter += 1;
} else if (agg.get(RollupField.AGG).equals(TermsAggregationBuilder.NAME)) {
thisTermsWeights += 1;
}
}
}
thisHistoWeights = counter == 0 ? 0 : thisHistoWeights / counter;
// Iterate over the second Cap and collect the same stats
counter = 0;
for (RollupJobCaps.RollupFieldCaps fieldCaps : o2.getFieldCaps().values()) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) {
thatTime = TimeValue.parseTimeValue((String) agg.get(RollupField.INTERVAL), RollupField.INTERVAL);
} else if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) {
thatHistoWeights += (long) agg.get(RollupField.INTERVAL);
counter += 1;
} else if (agg.get(RollupField.AGG).equals(TermsAggregationBuilder.NAME)) {
thatTermsWeights += 1;
}
}
}
thatHistoWeights = counter == 0 ? 0 : thatHistoWeights / counter;
// DateHistos are mandatory so these should always be present no matter what
assert thisTime != null;
assert thatTime != null;
// Compare on date interval first
// The "smaller" job is the one with the larger interval
int timeCompare = thisTime.compareTo(thatTime);
if (timeCompare != 0) {
return -timeCompare;
}
// If dates are the same, the "smaller" job is the one with a larger histo avg histo weight.
// Not bullet proof, but heuristically we prefer:
// - one job with interval 100 (avg 100) over one job with interval 10 (avg 10)
// - one job with interval 100 (avg 100) over one job with ten histos @ interval 10 (avg 10)
// because in both cases the larger intervals likely generate fewer documents
//
// The exception is if one of jobs had no histo (avg 0) then we prefer that
int histoCompare = Float.compare(thisHistoWeights, thatHistoWeights);
if (histoCompare != 0) {
if (thisHistoWeights == 0) {
return -1;
} else if (thatHistoWeights == 0) {
return 1;
}
return -histoCompare;
}
// If dates and histo are same, the "smaller" job is the one with fewer terms aggs since
// hopefully will generate fewer docs
return Long.compare(thisTermsWeights, thatTermsWeights);
// Ignoring metrics for now, since the "best job" resolution doesn't take those into account
// and we rely on the msearch functionality to merge away and duplicates
// Could potentially optimize there in the future to choose jobs with more metric
// coverage
};
}
}

View File

@ -10,7 +10,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilder;
@ -21,13 +20,12 @@ import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig;
import org.joda.time.DateTimeZone;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
@ -52,7 +50,7 @@ import java.util.function.Supplier;
* }</pre>
*
*
* The only publicly "consumable" API is {@link #translateAggregation(AggregationBuilder, List, NamedWriteableRegistry, List)}.
* The only publicly "consumable" API is {@link #translateAggregation(AggregationBuilder, List, NamedWriteableRegistry)}.
*/
public class RollupRequestTranslator {
@ -133,22 +131,18 @@ public class RollupRequestTranslator {
*/
public static List<AggregationBuilder> translateAggregation(AggregationBuilder source,
List<QueryBuilder> filterConditions,
NamedWriteableRegistry registry,
List<RollupJobCaps> jobCaps) {
NamedWriteableRegistry registry) {
if (source.getWriteableName().equals(DateHistogramAggregationBuilder.NAME)) {
validateAgg(((DateHistogramAggregationBuilder)source).field(), jobCaps, source.getWriteableName());
return translateDateHistogram((DateHistogramAggregationBuilder) source, filterConditions, registry, jobCaps);
return translateDateHistogram((DateHistogramAggregationBuilder) source, filterConditions, registry);
} else if (source.getWriteableName().equals(HistogramAggregationBuilder.NAME)) {
validateAgg(((HistogramAggregationBuilder)source).field(), jobCaps, source.getWriteableName());
return translateHistogram((HistogramAggregationBuilder) source, filterConditions, registry, jobCaps);
} else if (source instanceof ValuesSourceAggregationBuilder.LeafOnly) {
validateAgg(((ValuesSourceAggregationBuilder.LeafOnly)source).field(), jobCaps, source.getWriteableName());
return translateHistogram((HistogramAggregationBuilder) source, filterConditions, registry);
} else if (Rollup.SUPPORTED_METRICS.contains(source.getWriteableName())) {
return translateVSLeaf((ValuesSourceAggregationBuilder.LeafOnly)source, registry);
} else if (source.getWriteableName().equals(TermsAggregationBuilder.NAME)) {
validateAgg(((TermsAggregationBuilder)source).field(), jobCaps, source.getWriteableName());
return translateTerms((TermsAggregationBuilder)source, filterConditions, registry, jobCaps);
return translateTerms((TermsAggregationBuilder)source, filterConditions, registry);
} else {
throw new RuntimeException("Unable to translate aggregation tree into Rollup. Aggregation ["
throw new IllegalArgumentException("Unable to translate aggregation tree into Rollup. Aggregation ["
+ source.getName() + "] is of type [" + source.getClass().getSimpleName() + "] which is " +
"currently unsupported.");
}
@ -227,10 +221,9 @@ public class RollupRequestTranslator {
*/
private static List<AggregationBuilder> translateDateHistogram(DateHistogramAggregationBuilder source,
List<QueryBuilder> filterConditions,
NamedWriteableRegistry registry,
List<RollupJobCaps> jobCaps) {
NamedWriteableRegistry registry) {
return translateVSAggBuilder(source, filterConditions, registry, jobCaps, () -> {
return translateVSAggBuilder(source, filterConditions, registry, () -> {
DateHistogramAggregationBuilder rolledDateHisto
= new DateHistogramAggregationBuilder(source.getName());
@ -240,46 +233,9 @@ public class RollupRequestTranslator {
rolledDateHisto.interval(source.interval());
}
TimeValue bestInterval = null;
String bestJob = null;
String bestTZ = null;
for (RollupJobCaps cap : jobCaps) {
RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field());
if (fieldCaps != null) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get("agg").equals(DateHistogramAggregationBuilder.NAME)) {
TimeValue interval = TimeValue.parseTimeValue((String)agg.get(RollupField.INTERVAL), "date_histogram.interval");
if (bestInterval == null || interval.compareTo(bestInterval) < 0) {
bestInterval = interval;
bestJob = cap.getJobID();
bestTZ = (String)agg.get(DateHistoGroupConfig.TIME_ZONE.getPreferredName());
}
break;
}
}
}
}
// Even though rollups only use TimeValue, the user can still pass millis as the interval in a query, so we
// need to check to see what we're dealing with here.
if (source.dateHistogramInterval() != null) {
TimeValue sourceInterval = TimeValue.parseTimeValue(source.dateHistogramInterval().toString(),
"source.date_histogram.interval");
if (bestInterval == null || bestInterval.compareTo(sourceInterval) > 0) {
throw new IllegalArgumentException("Could not find a rolled date_histogram configuration that satisfies the interval ["
+ source.dateHistogramInterval() + "]");
}
} else {
if (bestInterval == null || bestInterval.getMillis() > source.interval()) {
throw new IllegalArgumentException("Could not find a rolled date_histogram configuration that satisfies the interval ["
+ source.interval() + "]");
}
}
filterConditions.add(new TermQueryBuilder(RollupField.formatFieldName(source, RollupField.INTERVAL), bestInterval.toString()));
String timezone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString();
filterConditions.add(new TermQueryBuilder(RollupField.formatFieldName(source,
DateHistoGroupConfig.TIME_ZONE.getPreferredName()), bestTZ));
filterConditions.add(new TermQueryBuilder(RollupField.formatMetaField(RollupField.ID.getPreferredName()), bestJob));
DateHistoGroupConfig.TIME_ZONE.getPreferredName()), timezone));
rolledDateHisto.offset(source.offset());
if (source.extendedBounds() != null) {
@ -299,43 +255,17 @@ public class RollupRequestTranslator {
* Notably, it adds a Sum metric to calculate the doc_count in each bucket.
*
* Conventions are identical to a date_histogram (excepting date-specific details), so see
* {@link #translateDateHistogram(DateHistogramAggregationBuilder, List, NamedWriteableRegistry, List)} for
* {@link #translateDateHistogram(DateHistogramAggregationBuilder, List, NamedWriteableRegistry)} for
* a complete list of conventions, examples, etc
*/
private static List<AggregationBuilder> translateHistogram(HistogramAggregationBuilder source,
List<QueryBuilder> filterConditions,
NamedWriteableRegistry registry,
List<RollupJobCaps> jobCaps) {
NamedWriteableRegistry registry) {
return translateVSAggBuilder(source, filterConditions, registry, jobCaps, () -> {
return translateVSAggBuilder(source, filterConditions, registry, () -> {
HistogramAggregationBuilder rolledHisto
= new HistogramAggregationBuilder(source.getName());
long bestInterval = Long.MAX_VALUE;
String bestJob = null;
for (RollupJobCaps cap : jobCaps) {
RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field());
if (fieldCaps != null) {
for (Map<String, Object> agg : fieldCaps.getAggs()) {
if (agg.get("agg").equals(HistogramAggregationBuilder.NAME)) {
if ((long)agg.get(RollupField.INTERVAL) < bestInterval) {
bestInterval = (long)agg.get(RollupField.INTERVAL);
bestJob = cap.getJobID();
}
break;
}
}
}
}
if (bestInterval == Long.MAX_VALUE || bestInterval > source.interval()) {
throw new IllegalArgumentException("Could not find a rolled histogram configuration that satisfies the interval ["
+ source.interval() + "]");
}
filterConditions.add(new TermQueryBuilder(RollupField.formatFieldName(source, RollupField.INTERVAL), bestInterval));
filterConditions.add(new TermQueryBuilder(RollupField.formatMetaField(RollupField.ID.getPreferredName()), bestJob));
rolledHisto.interval(source.interval());
rolledHisto.offset(source.offset());
if (Double.isFinite(source.minBound()) && Double.isFinite(source.maxBound())) {
@ -414,10 +344,9 @@ public class RollupRequestTranslator {
*/
private static List<AggregationBuilder> translateTerms(TermsAggregationBuilder source,
List<QueryBuilder> filterConditions,
NamedWriteableRegistry registry,
List<RollupJobCaps> jobCaps) {
NamedWriteableRegistry registry) {
return translateVSAggBuilder(source, filterConditions, registry, jobCaps, () -> {
return translateVSAggBuilder(source, filterConditions, registry, () -> {
TermsAggregationBuilder rolledTerms
= new TermsAggregationBuilder(source.getName(), source.valueType());
rolledTerms.field(RollupField.formatFieldName(source, RollupField.VALUE));
@ -435,7 +364,7 @@ public class RollupRequestTranslator {
rolledTerms.shardSize(source.shardSize());
}
rolledTerms.showTermDocCountError(source.showTermDocCountError());
//rolledTerms.size(termsAgg.size()); // TODO fix in core
rolledTerms.size(source.size());
return rolledTerms;
});
}
@ -458,7 +387,7 @@ public class RollupRequestTranslator {
*/
private static <T extends ValuesSourceAggregationBuilder> List<AggregationBuilder>
translateVSAggBuilder(ValuesSourceAggregationBuilder source, List<QueryBuilder> filterConditions,
NamedWriteableRegistry registry, List<RollupJobCaps> jobCaps, Supplier<T> factory) {
NamedWriteableRegistry registry, Supplier<T> factory) {
T rolled = factory.get();
@ -470,7 +399,7 @@ public class RollupRequestTranslator {
// Translate all subaggs and add to the newly translated agg
// NOTE: using for loop instead of stream because compiler explodes with a bug :/
for (AggregationBuilder subAgg : source.getSubAggregations()) {
List<AggregationBuilder> translated = translateAggregation(subAgg, filterConditions, registry, jobCaps);
List<AggregationBuilder> translated = translateAggregation(subAgg, filterConditions, registry);
for (AggregationBuilder t : translated) {
rolled.subAggregation(t);
}
@ -505,7 +434,8 @@ public class RollupRequestTranslator {
*
* However, for `avg` metrics (and potentially others in the future), the agg is translated into
* a sum + sum aggs; one for count and one for sum. When unrolling these will be combined back into
* a single avg. E.g. for an `avg` agg:
* a single avg. Note that we also have to rename the avg agg name to distinguish it from empty
* buckets. E.g. for an `avg` agg:
*
* <pre>{@code
* {
@ -518,7 +448,7 @@ public class RollupRequestTranslator {
* <pre>{@code
* [
* {
* "the_avg": {
* "the_avg.value": {
* "sum" : { "field" : "some_field.avg.value" }}
* },
* {
@ -545,6 +475,7 @@ public class RollupRequestTranslator {
* IF the agg is an AvgAgg, the following additional conventions are added:
* <ul>
* <li>Agg type: becomes SumAgg, instead of AvgAgg</li>
* <li>Named: {source name}.value</li>
* <li>Additionally, an extra SumAgg is added:</li>
* <li>
* <ul>
@ -569,9 +500,10 @@ public class RollupRequestTranslator {
if (metric instanceof AvgAggregationBuilder) {
rolledMetrics = new ArrayList<>(2);
// Avg metric is translated into a MaxAgg, e.g.
// "the_avg" : { "field" : "some_field.avg.value" }}
SumAggregationBuilder value = new SumAggregationBuilder(metric.getName());
// Avg metric is translated into a SumAgg, e.g.
// Note: we change the agg name to prevent conflicts with empty buckets
// "the_avg.value" : { "field" : "some_field.avg.value" }}
SumAggregationBuilder value = new SumAggregationBuilder(RollupField.formatValueAggName(metric.getName()));
value.field(RollupField.formatFieldName(metric, RollupField.VALUE));
rolledMetrics.add(value);
@ -604,27 +536,4 @@ public class RollupRequestTranslator {
}
}
}
/**
* Validate the aggregation to ensure that a corresponding agg was configured in the
* rollup configuration for the index. Throws an exception if a matching config is not found
*
* @param field The field name of the agg we are validating
* @param jobCaps The list of rollup caps for the indices in the request
* @param targetAgg The type of aggregation that this field needs to match
*/
private static void validateAgg(String field, List<RollupJobCaps> jobCaps, String targetAgg) {
for (RollupJobCaps cap : jobCaps) {
if (cap.getFieldCaps().keySet().contains(field)) {
for (Map<String, Object> aggs : cap.getFieldCaps().get(field).getAggs()) {
if (aggs.get("agg").equals(targetAgg)) {
return;
}
}
}
}
throw new IllegalArgumentException("There is not a [" + targetAgg + "] agg with name [" + field
+ "] configured in selected rollup indices, cannot translate aggregation.");
}
}

View File

@ -39,10 +39,14 @@ import org.elasticsearch.xpack.core.rollup.RollupField;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
@ -58,7 +62,40 @@ public class RollupResponseTranslator {
private static final Logger logger = Loggers.getLogger(RollupResponseTranslator.class);
/**
* Combines an msearch with rollup + non-rollup aggregations into a SearchResponse
* Verifies a live-only search response. Essentially just checks for failure then returns
* the response since we have no work to do
*/
public static SearchResponse verifyResponse(MultiSearchResponse.Item normalResponse) {
if (normalResponse.isFailure()) {
throw new RuntimeException(normalResponse.getFailureMessage(), normalResponse.getFailure());
}
return normalResponse.getResponse();
}
/**
* Translates a rollup-only search response back into the expected convention. Similar to
* {@link #combineResponses(MultiSearchResponse.Item[], InternalAggregation.ReduceContext)} except it only
* has to deal with the rollup response (no live response)
*
* See {@link #combineResponses(MultiSearchResponse.Item[], InternalAggregation.ReduceContext)} for more details
* on the translation conventions
*/
public static SearchResponse translateResponse(MultiSearchResponse.Item[] rolledMsearch,
InternalAggregation.ReduceContext reduceContext) {
List<SearchResponse> responses = Arrays.stream(rolledMsearch)
.map(item -> {
if (item.isFailure()) {
throw new RuntimeException(item.getFailureMessage(), item.getFailure());
}
return item.getResponse();
}).collect(Collectors.toList());
return doCombineResponse(null, responses, reduceContext);
}
/**
* Combines an msearch with rollup + live aggregations into a SearchResponse
* representing the union of the two responses. The response format is identical to
* a non-rollup search response (aka a "normal aggregation" response).
*
@ -151,110 +188,118 @@ public class RollupResponseTranslator {
* so that the final product looks like a regular aggregation response, allowing it to be
* reduced/merged into the response from the un-rolled index
*
* @param normalResponse The MultiSearch response from a non-rollup msearch
* @param msearchResponses The responses from the msearch, where the first response is the live-index response
*/
public static SearchResponse verifyResponse(MultiSearchResponse.Item normalResponse) {
if (normalResponse.isFailure()) {
throw new RuntimeException(normalResponse.getFailureMessage(), normalResponse.getFailure());
}
return normalResponse.getResponse();
}
public static SearchResponse combineResponses(MultiSearchResponse.Item[] msearchResponses,
InternalAggregation.ReduceContext reduceContext) {
boolean liveMissing = false;
assert msearchResponses.length >= 2;
public static SearchResponse translateResponse(MultiSearchResponse.Item rolledResponse,
InternalAggregation.ReduceContext reduceContext) {
if (rolledResponse.isFailure()) {
throw new RuntimeException(rolledResponse.getFailureMessage(), rolledResponse.getFailure());
}
return doCombineResponse(null, rolledResponse.getResponse(), reduceContext);
}
public static SearchResponse combineResponses(MultiSearchResponse.Item normalResponse, MultiSearchResponse.Item rolledResponse,
InternalAggregation.ReduceContext reduceContext) {
boolean normalMissing = false;
if (normalResponse.isFailure()) {
Exception e = normalResponse.getFailure();
// If we have a rollup response we can tolerate a missing normal response
// The live response is always first
MultiSearchResponse.Item liveResponse = msearchResponses[0];
if (liveResponse.isFailure()) {
Exception e = liveResponse.getFailure();
// If we have a rollup response we can tolerate a missing live response
if (e instanceof IndexNotFoundException) {
logger.warn("\"Live\" index not found during rollup search.", e);
normalMissing = true;
liveMissing = true;
} else {
throw new RuntimeException(normalResponse.getFailureMessage(), normalResponse.getFailure());
throw new RuntimeException(liveResponse.getFailureMessage(), liveResponse.getFailure());
}
}
if (rolledResponse.isFailure()) {
Exception e = rolledResponse.getFailure();
// If we have a normal response we can tolerate a missing rollup response, although it theoretically
// should be handled by a different code path (verifyResponse)
if (e instanceof IndexNotFoundException && normalMissing == false) {
logger.warn("\"Rollup\" index not found during rollup search.", e);
return verifyResponse(normalResponse);
} else {
throw new RuntimeException(rolledResponse.getFailureMessage(), rolledResponse.getFailure());
}
List<SearchResponse> rolledResponses = Arrays.stream(msearchResponses)
.skip(1)
.map(item -> {
if (item.isFailure()) {
Exception e = item.getFailure();
// If we have a normal response we can tolerate a missing rollup response, although it theoretically
// should be handled by a different code path (verifyResponse)
if (e instanceof IndexNotFoundException) {
logger.warn("Rollup index not found during rollup search.", e);
} else {
throw new RuntimeException(item.getFailureMessage(), item.getFailure());
}
return null;
} else {
return item.getResponse();
}
}).filter(Objects::nonNull).collect(Collectors.toList());
// If we only have a live index left, process it directly
if (rolledResponses.isEmpty() && liveMissing == false) {
return verifyResponse(liveResponse);
} else if (rolledResponses.isEmpty() && liveMissing) {
throw new RuntimeException("No indices (live or rollup) found during rollup search");
}
return doCombineResponse(normalResponse.getResponse(), rolledResponse.getResponse(), reduceContext);
return doCombineResponse(liveResponse.getResponse(), rolledResponses, reduceContext);
}
private static SearchResponse doCombineResponse(SearchResponse originalResponse, SearchResponse rolledResponse,
private static SearchResponse doCombineResponse(SearchResponse liveResponse, List<SearchResponse> rolledResponses,
InternalAggregation.ReduceContext reduceContext) {
final InternalAggregations originalAggs = originalResponse != null
? (InternalAggregations)originalResponse.getAggregations()
final InternalAggregations liveAggs = liveResponse != null
? (InternalAggregations)liveResponse.getAggregations()
: InternalAggregations.EMPTY;
if (rolledResponse.getAggregations() == null || rolledResponse.getAggregations().asList().size() == 0) {
logger.debug(Strings.toString(rolledResponse));
throw new RuntimeException("Expected to find aggregations in rollup response, but none found.");
}
List<InternalAggregation> unrolledAggs = rolledResponse.getAggregations().asList().stream()
.map(agg -> {
// We expect a filter agg here because the rollup convention is that all translated aggs
// will start with a filter, containing various agg-specific predicates. If there
// *isn't* a filter agg here, something has gone very wrong!
if ((agg instanceof InternalFilter) == false) {
throw new RuntimeException("Expected [" +agg.getName()
+ "] to be a FilterAggregation, but was ["
+ agg.getClass().getSimpleName() + "]");
}
return unrollAgg(((InternalFilter)agg).getAggregations(), originalAggs);
})
.flatMap(Collection::stream)
.collect(Collectors.toList());
rolledResponses.forEach(r -> {
if (r == null || r.getAggregations() == null || r.getAggregations().asList().size() == 0) {
throw new RuntimeException("Expected to find aggregations in rollup response, but none found.");
}
});
// The combination process returns a tree that is identical to the non-rolled
// which means we can use aggregation's reduce method to combine, just as if
// it was a result from another shard
List<InternalAggregations> toReduce = new ArrayList<>(2);
toReduce.add(new InternalAggregations(unrolledAggs));
if (originalAggs.asList().size() != 0) {
toReduce.add(originalAggs);
InternalAggregations currentTree = new InternalAggregations(Collections.emptyList());
for (SearchResponse rolledResponse : rolledResponses) {
List<InternalAggregation> unrolledAggs = new ArrayList<>(rolledResponse.getAggregations().asList().size());
for (Aggregation agg : rolledResponse.getAggregations()) {
// We expect a filter agg here because the rollup convention is that all translated aggs
// will start with a filter, containing various agg-specific predicates. If there
// *isn't* a filter agg here, something has gone very wrong!
if ((agg instanceof InternalFilter) == false) {
throw new RuntimeException("Expected [" +agg.getName()
+ "] to be a FilterAggregation, but was ["
+ agg.getClass().getSimpleName() + "]");
}
unrolledAggs.addAll(unrollAgg(((InternalFilter)agg).getAggregations(), liveAggs, currentTree));
}
// Iteratively merge in each new set of unrolled aggs, so that we can identify/fix overlapping doc_counts
// in the next round of unrolling
InternalAggregations finalUnrolledAggs = new InternalAggregations(unrolledAggs);
currentTree = InternalAggregations.reduce(Arrays.asList(currentTree, finalUnrolledAggs),
new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true));
}
InternalAggregations finalCombinedAggs = InternalAggregations.reduce(toReduce,
new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true));
// Add in the live aggregations if they exist
if (liveAggs.asList().size() != 0) {
currentTree = InternalAggregations.reduce(Arrays.asList(currentTree, liveAggs),
new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true));
}
// TODO allow profiling in the future
InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), finalCombinedAggs,
null, null, rolledResponse.isTimedOut(),
rolledResponse.isTerminatedEarly(), rolledResponse.getNumReducePhases());
InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), currentTree, null, null,
rolledResponses.stream().anyMatch(SearchResponse::isTimedOut),
rolledResponses.stream().anyMatch(SearchResponse::isTimedOut),
rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum());
int totalShards = rolledResponse.getTotalShards();
int sucessfulShards = rolledResponse.getSuccessfulShards();
int skippedShards = rolledResponse.getSkippedShards();
long took = rolledResponse.getTook().getMillis();
int totalShards = rolledResponses.stream().mapToInt(SearchResponse::getTotalShards).sum();
int sucessfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum();
int skippedShards = rolledResponses.stream().mapToInt(SearchResponse::getSkippedShards).sum();
long took = rolledResponses.stream().mapToLong(r -> r.getTook().getMillis()).sum() ;
if (originalResponse != null) {
totalShards += originalResponse.getTotalShards();
sucessfulShards += originalResponse.getSuccessfulShards();
skippedShards += originalResponse.getSkippedShards();
took = Math.max(took, originalResponse.getTook().getMillis());
if (liveResponse != null) {
totalShards += liveResponse.getTotalShards();
sucessfulShards += liveResponse.getSuccessfulShards();
skippedShards += liveResponse.getSkippedShards();
took = Math.max(took, liveResponse.getTook().getMillis());
}
// Shard failures are ignored atm, so returning an empty array is fine
return new SearchResponse(combinedInternal, null, totalShards, sucessfulShards, skippedShards,
took, ShardSearchFailure.EMPTY_ARRAY, rolledResponse.getClusters());
took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters());
}
/**
@ -266,7 +311,8 @@ public class RollupResponseTranslator {
*
* @return An unrolled aggregation that mimics the structure of `base`, allowing reduction
*/
private static List<InternalAggregation> unrollAgg(InternalAggregations rolled, InternalAggregations original) {
private static List<InternalAggregation> unrollAgg(InternalAggregations rolled, InternalAggregations original,
InternalAggregations currentTree) {
return rolled.asList().stream()
.filter(subAgg -> !subAgg.getName().endsWith("." + RollupField.COUNT_FIELD))
.map(agg -> {
@ -283,7 +329,7 @@ public class RollupResponseTranslator {
count = getAggCount(agg, rolled.getAsMap());
}
return unrollAgg((InternalAggregation)agg, original.get(agg.getName()), count);
return unrollAgg((InternalAggregation)agg, original.get(agg.getName()), currentTree.get(agg.getName()), count);
}).collect(Collectors.toList());
}
@ -296,10 +342,12 @@ public class RollupResponseTranslator {
*
* @return An unrolled aggregation that mimics the structure of base, allowing reduction
*/
protected static InternalAggregation unrollAgg(InternalAggregation rolled, InternalAggregation originalAgg, long count) {
protected static InternalAggregation unrollAgg(InternalAggregation rolled, InternalAggregation originalAgg,
InternalAggregation currentTree, long count) {
if (rolled instanceof InternalMultiBucketAggregation) {
return unrollMultiBucket((InternalMultiBucketAggregation) rolled, (InternalMultiBucketAggregation) originalAgg);
return unrollMultiBucket((InternalMultiBucketAggregation) rolled, (InternalMultiBucketAggregation) originalAgg,
(InternalMultiBucketAggregation) currentTree);
} else if (rolled instanceof SingleValue) {
return unrollMetric((SingleValue) rolled, count);
} else {
@ -314,34 +362,39 @@ public class RollupResponseTranslator {
* called by other internal methods in this class, rather than directly calling the per-type methods.
*/
@SuppressWarnings("unchecked")
private static InternalAggregation unrollMultiBucket(InternalMultiBucketAggregation rolled, InternalMultiBucketAggregation original) {
private static InternalAggregation unrollMultiBucket(InternalMultiBucketAggregation rolled, InternalMultiBucketAggregation original,
InternalMultiBucketAggregation currentTree) {
// The only thing unique between all the multibucket agg is the type of bucket they
// need, so this if/else simply creates specialized closures that return the appropriate
// bucket type. Otherwise the heavy-lifting is in
// {@link #unrollMultiBucket(InternalMultiBucketAggregation, InternalMultiBucketAggregation, TriFunction)}
if (rolled instanceof InternalDateHistogram) {
return unrollMultiBucket(rolled, original, (bucket, bucketCount, subAggs) -> {
return unrollMultiBucket(rolled, original, currentTree, (bucket, bucketCount, subAggs) -> {
long key = ((InternalDateHistogram) rolled).getKey(bucket).longValue();
DocValueFormat formatter = ((InternalDateHistogram.Bucket)bucket).getFormatter();
assert bucketCount >= 0;
return new InternalDateHistogram.Bucket(key, bucketCount,
((InternalDateHistogram.Bucket) bucket).getKeyed(), formatter, subAggs);
});
} else if (rolled instanceof InternalHistogram) {
return unrollMultiBucket(rolled, original, (bucket, bucketCount, subAggs) -> {
return unrollMultiBucket(rolled, original, currentTree, (bucket, bucketCount, subAggs) -> {
long key = ((InternalHistogram) rolled).getKey(bucket).longValue();
DocValueFormat formatter = ((InternalHistogram.Bucket)bucket).getFormatter();
assert bucketCount >= 0;
return new InternalHistogram.Bucket(key, bucketCount, ((InternalHistogram.Bucket) bucket).getKeyed(), formatter, subAggs);
});
} else if (rolled instanceof StringTerms) {
return unrollMultiBucket(rolled, original, (bucket, bucketCount, subAggs) -> {
return unrollMultiBucket(rolled, original, currentTree, (bucket, bucketCount, subAggs) -> {
BytesRef key = new BytesRef(bucket.getKeyAsString().getBytes(StandardCharsets.UTF_8));
assert bucketCount >= 0;
//TODO expose getFormatter(), keyed upstream in Core
return new StringTerms.Bucket(key, bucketCount, subAggs, false, 0, DocValueFormat.RAW);
});
} else if (rolled instanceof LongTerms) {
return unrollMultiBucket(rolled, original, (bucket, bucketCount, subAggs) -> {
return unrollMultiBucket(rolled, original, currentTree, (bucket, bucketCount, subAggs) -> {
long key = (long)bucket.getKey();
assert bucketCount >= 0;
//TODO expose getFormatter(), keyed upstream in Core
return new LongTerms.Bucket(key, bucketCount, subAggs, false, 0, DocValueFormat.RAW);
});
@ -363,28 +416,49 @@ public class RollupResponseTranslator {
private static <A extends InternalMultiBucketAggregation,
B extends InternalBucket,
T extends InternalMultiBucketAggregation<A, B>>
InternalAggregation unrollMultiBucket(T source, T original,
InternalAggregation unrollMultiBucket(T source, T original, T currentTree,
TriFunction<InternalBucket, Long, InternalAggregations, B> bucketFactory) {
Set<Object> keys = new HashSet<>();
Map<Object, InternalBucket> originalKeys = new HashMap<>();
Map<Object, InternalBucket> currentKeys = new HashMap<>();
if (original != null) {
original.getBuckets().forEach(b -> keys.add(b.getKey()));
original.getBuckets().forEach(b -> originalKeys.put(b.getKey(), b));
}
if (currentTree != null) {
currentTree.getBuckets().forEach(b -> currentKeys.put(b.getKey(), b));
}
// Iterate over the buckets in the multibucket
List<B> buckets = source.getBuckets()
.stream()
.filter(b -> keys.contains(b.getKey()) == false) // If the original has this key, ignore the rolled version
.filter(b -> originalKeys.containsKey(b.getKey()) == false) // If the original has this key, ignore the rolled version
.map(bucket -> {
// Grab the value from the count agg (if it exists), which represents this bucket's doc_count
long bucketCount = getAggCount(source, bucket.getAggregations().getAsMap());
// Don't generate buckets if the doc count is zero
if (bucketCount == 0) {
return null;
}
// current, partially merged tree contains this key. Defer to the existing doc_count if it is non-zero
if (currentKeys.containsKey(bucket.getKey()) && currentKeys.get(bucket.getKey()).getDocCount() != 0) {
// Unlike above where we return null if doc_count is zero, we return a doc_count: 0 bucket
// here because it may have sub-aggs that need merging, whereas above the bucket was just empty/null
bucketCount = 0;
}
// Then iterate over the subAggs in the bucket
InternalAggregations subAggs = unrollSubAggsFromMulti(bucket);
InternalAggregations subAggs = unrollSubAggsFromMulti(bucket, originalKeys.get(bucket.getKey()),
currentKeys.get(bucket.getKey()));
return bucketFactory.apply(bucket, bucketCount, subAggs);
}).collect(Collectors.toList());
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
return source.create(buckets);
}
@ -393,7 +467,7 @@ public class RollupResponseTranslator {
*
* @param bucket The current bucket that we wish to unroll
*/
private static InternalAggregations unrollSubAggsFromMulti(InternalBucket bucket) {
private static InternalAggregations unrollSubAggsFromMulti(InternalBucket bucket, InternalBucket original, InternalBucket currentTree) {
// Iterate over the subAggs in each bucket
return new InternalAggregations(bucket.getAggregations()
.asList().stream()
@ -404,7 +478,17 @@ public class RollupResponseTranslator {
long count = getAggCount(subAgg, bucket.getAggregations().asMap());
return unrollAgg((InternalAggregation) subAgg, null, count);
InternalAggregation originalSubAgg = null;
if (original != null && original.getAggregations() != null) {
originalSubAgg = original.getAggregations().get(subAgg.getName());
}
InternalAggregation currentSubAgg = null;
if (currentTree != null && currentTree.getAggregations() != null) {
currentSubAgg = currentTree.getAggregations().get(subAgg.getName());
}
return unrollAgg((InternalAggregation) subAgg, originalSubAgg, currentSubAgg, count);
}).collect(Collectors.toList()));
}
@ -419,7 +503,8 @@ public class RollupResponseTranslator {
} else if (metric instanceof InternalSum) {
// If count is anything other than -1, this sum is actually an avg
if (count != -1) {
return new InternalAvg(metric.getName(), metric.value(), count, DocValueFormat.RAW,
// Note: Avgs have a slightly different name to prevent collision with empty bucket defaults
return new InternalAvg(metric.getName().replace("." + RollupField.VALUE, ""), metric.value(), count, DocValueFormat.RAW,
metric.pipelineAggregators(), metric.getMetaData());
}
return metric;
@ -435,14 +520,16 @@ public class RollupResponseTranslator {
if (agg.getType().equals(DateHistogramAggregationBuilder.NAME)
|| agg.getType().equals(HistogramAggregationBuilder.NAME)
|| agg.getType().equals(StringTerms.NAME) || agg.getType().equals(LongTerms.NAME)
|| agg.getType().equals(SumAggregationBuilder.NAME)) {
|| agg.getType().equals(StringTerms.NAME) || agg.getType().equals(LongTerms.NAME)) {
countPath = RollupField.formatCountAggName(agg.getName());
} else if (agg.getType().equals(SumAggregationBuilder.NAME)) {
// Note: Avgs have a slightly different name to prevent collision with empty bucket defaults
countPath = RollupField.formatCountAggName(agg.getName().replace("." + RollupField.VALUE, ""));
}
if (countPath != null && aggMap.get(countPath) != null) {
// we always set the count fields to Sum aggs, so this is safe
assert (aggMap.get(countPath) instanceof InternalSum);
assert aggMap.get(countPath) instanceof InternalSum;
return (long)((InternalSum) aggMap.get(countPath)).getValue();
}

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.rollup.action;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.MultiSearchResponse;
@ -22,7 +23,11 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
@ -48,17 +53,22 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction;
import org.elasticsearch.xpack.rollup.Rollup;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.rollup.RollupJobIdentifierUtils;
import org.elasticsearch.xpack.rollup.RollupRequestTranslator;
import org.elasticsearch.xpack.rollup.RollupResponseTranslator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
public class TransportRollupSearchAction extends TransportAction<SearchRequest, SearchResponse> {
@ -88,73 +98,73 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
Triple<String[], String[], List<RollupJobCaps>> indices = separateIndices(request.indices(),
RollupSearchContext rollupSearchContext = separateIndices(request.indices(),
clusterService.state().getMetaData().indices());
MultiSearchRequest msearch = createMSearchRequest(request, registry, indices.v1(), indices.v2(), indices.v3());
MultiSearchRequest msearch = createMSearchRequest(request, registry, rollupSearchContext);
client.multiSearch(msearch, ActionListener.wrap(msearchResponse -> {
InternalAggregation.ReduceContext context
= new InternalAggregation.ReduceContext(bigArrays, scriptService, false);
listener.onResponse(processResponses(indices, msearchResponse, context));
listener.onResponse(processResponses(rollupSearchContext, msearchResponse, context));
}, listener::onFailure));
}
static SearchResponse processResponses(Triple<String[], String[], List<RollupJobCaps>> indices, MultiSearchResponse msearchResponse,
InternalAggregation.ReduceContext context) {
if (indices.v1.length > 0 && indices.v2.length > 0) {
static SearchResponse processResponses(RollupSearchContext rollupContext, MultiSearchResponse msearchResponse,
InternalAggregation.ReduceContext reduceContext) {
if (rollupContext.hasLiveIndices() && rollupContext.hasRollupIndices()) {
// Both
assert(msearchResponse.getResponses().length == 2);
return RollupResponseTranslator.combineResponses(msearchResponse.getResponses()[0], msearchResponse.getResponses()[1], context);
} else if (indices.v1.length > 0) {
return RollupResponseTranslator.combineResponses(msearchResponse.getResponses(), reduceContext);
} else if (rollupContext.hasLiveIndices()) {
// Only live
assert(msearchResponse.getResponses().length == 1);
assert msearchResponse.getResponses().length == 1;
return RollupResponseTranslator.verifyResponse(msearchResponse.getResponses()[0]);
} else {
} else if (rollupContext.hasRollupIndices()) {
// Only rollup
assert(msearchResponse.getResponses().length == 1);
return RollupResponseTranslator.translateResponse(msearchResponse.getResponses()[0], context);
return RollupResponseTranslator.translateResponse(msearchResponse.getResponses(), reduceContext);
}
throw new RuntimeException("MSearch response was empty, cannot unroll RollupSearch results");
}
static MultiSearchRequest createMSearchRequest(SearchRequest request, NamedWriteableRegistry registry,
String[] normalIndices, String[] rollupIndices,
List<RollupJobCaps> jobCaps) {
static MultiSearchRequest createMSearchRequest(SearchRequest request, NamedWriteableRegistry registry, RollupSearchContext context) {
if (normalIndices.length == 0 && rollupIndices.length == 0) {
if (context.hasLiveIndices() == false && context.hasRollupIndices() == false) {
// Don't support _all on everything right now, for code simplicity
throw new IllegalArgumentException("Must specify at least one rollup index in _rollup_search API");
} else if (rollupIndices.length == 0) {
} else if (context.hasLiveIndices() && context.hasRollupIndices() == false) {
// not best practice, but if the user accidentally only sends "normal" indices we can support that
logger.debug("Creating msearch with only normal request");
final SearchRequest originalRequest = new SearchRequest(normalIndices, request.source());
final SearchRequest originalRequest = new SearchRequest(context.getLiveIndices(), request.source());
return new MultiSearchRequest().add(originalRequest);
}
// Rollup only supports a limited subset of the search API, validate and make sure
// nothing is set that we can't support
validateSearchRequest(request);
QueryBuilder rewritten = rewriteQuery(request.source().query(), jobCaps);
// The original request is added as-is (if normal indices exist), minus the rollup indices
final SearchRequest originalRequest = new SearchRequest(normalIndices, request.source());
final SearchRequest originalRequest = new SearchRequest(context.getLiveIndices(), request.source());
MultiSearchRequest msearch = new MultiSearchRequest();
if (normalIndices.length != 0) {
if (context.hasLiveIndices()) {
msearch.add(originalRequest);
}
SearchSourceBuilder rolledSearchSource = new SearchSourceBuilder();
rolledSearchSource.query(rewritten);
rolledSearchSource.size(0);
AggregatorFactories.Builder sourceAgg = request.source().aggregations();
// Find our list of "best" job caps
Set<RollupJobCaps> validatedCaps = new HashSet<>();
sourceAgg.getAggregatorFactories()
.forEach(agg -> validatedCaps.addAll(RollupJobIdentifierUtils.findBestJobs(agg, context.getJobCaps())));
List<String> jobIds = validatedCaps.stream().map(RollupJobCaps::getJobID).collect(Collectors.toList());
for (AggregationBuilder agg : sourceAgg.getAggregatorFactories()) {
List<QueryBuilder> filterConditions = new ArrayList<>(5);
filterConditions.addAll(mandatoryFilterConditions());
// Translate the agg tree, and collect any potential filtering clauses
List<AggregationBuilder> translatedAgg = RollupRequestTranslator.translateAggregation(agg, filterConditions, registry, jobCaps);
List<AggregationBuilder> translatedAgg = RollupRequestTranslator.translateAggregation(agg, filterConditions, registry);
BoolQueryBuilder boolQuery = new BoolQueryBuilder();
filterConditions.forEach(boolQuery::must);
@ -163,15 +173,51 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
translatedAgg.forEach(filterAgg::subAggregation);
rolledSearchSource.aggregation(filterAgg);
}
SearchRequest rolledSearch = new SearchRequest(rollupIndices, rolledSearchSource)
.types(request.types());
msearch.add(rolledSearch);
logger.debug("Original query\n" + request.toString());
logger.debug("Translated rollup query:\n" + rolledSearch.toString());
// Rewrite the user's query to our internal conventions, checking against the validated job caps
QueryBuilder rewritten = rewriteQuery(request.source().query(), validatedCaps);
for (String id : jobIds) {
SearchSourceBuilder copiedSource;
try {
copiedSource = copyWriteable(rolledSearchSource, registry, SearchSourceBuilder::new);
} catch (IOException e) {
throw new RuntimeException("Encountered IO exception while trying to build rollup request.", e);
}
// filter the rewritten query by JobID
copiedSource.query(new BoolQueryBuilder()
.must(rewritten)
.filter(new TermQueryBuilder(RollupField.formatMetaField(RollupField.ID.getPreferredName()), id))
.filter(new TermQueryBuilder(RollupField.formatMetaField(RollupField.VERSION_FIELD), Rollup.ROLLUP_VERSION)));
// And add a new msearch per JobID
msearch.add(new SearchRequest(context.getRollupIndices(), copiedSource).types(request.types()));
}
return msearch;
}
/**
* Lifted from ESTestCase :s Don't reuse this anywhere!
*
* Create a copy of an original {@link SearchSourceBuilder} object by running it through a {@link BytesStreamOutput} and
* reading it in again using a {@link Writeable.Reader}. The stream that is wrapped around the {@link StreamInput}
* potentially need to use a {@link NamedWriteableRegistry}, so this needs to be provided too
*/
private static SearchSourceBuilder copyWriteable(SearchSourceBuilder original, NamedWriteableRegistry namedWriteableRegistry,
Writeable.Reader<SearchSourceBuilder> reader) throws IOException {
Writeable.Writer<SearchSourceBuilder> writer = (out, value) -> value.writeTo(out);
try (BytesStreamOutput output = new BytesStreamOutput()) {
output.setVersion(Version.CURRENT);
writer.write(output, original);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
in.setVersion(Version.CURRENT);
return reader.read(in);
}
}
}
static void validateSearchRequest(SearchRequest request) {
// Rollup does not support hits at the moment
if (request.source().size() != 0) {
@ -205,9 +251,9 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
}
}
static QueryBuilder rewriteQuery(QueryBuilder builder, List<RollupJobCaps> jobCaps) {
static QueryBuilder rewriteQuery(QueryBuilder builder, Set<RollupJobCaps> jobCaps) {
if (builder == null) {
return null;
return new MatchAllQueryBuilder();
}
if (builder.getWriteableName().equals(BoolQueryBuilder.NAME)) {
BoolQueryBuilder rewrittenBool = new BoolQueryBuilder();
@ -239,15 +285,23 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
return fieldCaps.getAggs().stream()
// For now, we only allow filtering on grouping fields
.filter(agg -> {
String type = (String)agg.get("agg");
String type = (String)agg.get(RollupField.AGG);
return type.equals(TermsAggregationBuilder.NAME)
|| type.equals(DateHistogramAggregationBuilder.NAME)
|| type.equals(HistogramAggregationBuilder.NAME);
})
// Rewrite the field name to our convention (e.g. "foo" -> "date_histogram.foo.value")
.map(agg -> RollupField.formatFieldName(fieldName, (String)agg.get("agg"), RollupField.VALUE))
// Rewrite the field name to our convention (e.g. "foo" -> "date_histogram.foo.timestamp")
.map(agg -> {
if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) {
return RollupField.formatFieldName(fieldName, (String)agg.get(RollupField.AGG), RollupField.TIMESTAMP);
} else {
return RollupField.formatFieldName(fieldName, (String)agg.get(RollupField.AGG), RollupField.VALUE);
}
})
.collect(Collectors.toList());
}).collect(ArrayList::new, List::addAll, List::addAll);
})
.distinct()
.collect(ArrayList::new, List::addAll, List::addAll);
if (rewrittenFieldName.isEmpty()) {
throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builder.getWriteableName()
@ -283,8 +337,7 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
}
}
static Triple<String[], String[], List<RollupJobCaps>> separateIndices(String[] indices,
ImmutableOpenMap<String, IndexMetaData> indexMetaData) {
static RollupSearchContext separateIndices(String[] indices, ImmutableOpenMap<String, IndexMetaData> indexMetaData) {
if (indices.length == 0) {
throw new IllegalArgumentException("Must specify at least one concrete index.");
@ -292,7 +345,7 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
List<String> rollup = new ArrayList<>();
List<String> normal = new ArrayList<>();
List<RollupJobCaps> jobCaps = new ArrayList<>();
Set<RollupJobCaps> jobCaps = new HashSet<>();
Arrays.stream(indices).forEach(i -> {
if (i.equals(MetaData.ALL)) {
throw new IllegalArgumentException("Searching _all via RollupSearch endpoint is not supported at this time.");
@ -305,8 +358,11 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
normal.add(i);
}
});
assert(normal.size() + rollup.size() > 0);
return new Triple<>(normal.toArray(new String[normal.size()]), rollup.toArray(new String[rollup.size()]), jobCaps);
assert normal.size() + rollup.size() > 0;
if (rollup.size() > 1) {
throw new IllegalArgumentException("RollupSearch currently only supports searching one rollup index at a time.");
}
return new RollupSearchContext(normal.toArray(new String[normal.size()]), rollup.toArray(new String[rollup.size()]), jobCaps);
}
class TransportHandler implements TransportRequestHandler<SearchRequest> {
@ -347,69 +403,37 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
}
}
/**
* Adds various filter conditions that apply to the entire rollup query, such
* as the rollup hash and rollup version
*/
private static List<QueryBuilder> mandatoryFilterConditions() {
List<QueryBuilder> conditions = new ArrayList<>(1);
conditions.add(new TermQueryBuilder(RollupField.ROLLUP_META + "." + RollupField.VERSION_FIELD, Rollup.ROLLUP_VERSION));
return conditions;
}
static class RollupSearchContext {
private final String[] liveIndices;
private final String[] rollupIndices;
private final Set<RollupJobCaps> jobCaps;
static class Triple<V1, V2, V3> {
public static <V1, V2, V3> Triple<V1, V2, V3> triple(V1 v1, V2 v2, V3 v3) {
return new Triple<>(v1, v2, v3);
RollupSearchContext(String[] liveIndices, String[] rollupIndices, Set<RollupJobCaps> jobCaps) {
this.liveIndices = Objects.requireNonNull(liveIndices);
this.rollupIndices = Objects.requireNonNull(rollupIndices);
this.jobCaps = Objects.requireNonNull(jobCaps);
}
private final V1 v1;
private final V2 v2;
private final V3 v3;
Triple(V1 v1, V2 v2, V3 v3) {
this.v1 = v1;
this.v2 = v2;
this.v3 = v3;
boolean hasLiveIndices() {
return liveIndices.length != 0;
}
public V1 v1() {
return v1;
boolean hasRollupIndices() {
return rollupIndices.length != 0;
}
public V2 v2() {
return v2;
String[] getLiveIndices() {
return liveIndices;
}
public V3 v3() {
return v3;
String[] getRollupIndices() {
return rollupIndices;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Triple<?,?,?> triple = (Triple) o;
if (v1 != null ? !v1.equals(triple.v1) : triple.v1 != null) return false;
if (v2 != null ? !v2.equals(triple.v2) : triple.v2 != null) return false;
if (v3 != null ? !v3.equals(triple.v3) : triple.v3 != null) return false;
return true;
Set<RollupJobCaps> getJobCaps() {
return jobCaps;
}
@Override
public int hashCode() {
int result = v1 != null ? v1.hashCode() : 0;
result = 31 * result + (v2 != null ? v2.hashCode() : 0) + (v3 != null ? v3.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "Tuple [v1=" + v1 + ", v2=" + v2 + ", v3=" + v3 + "]";
}
}
}

View File

@ -93,7 +93,7 @@ public class TransportStartRollupAction extends TransportTasksAction<RollupJobTa
throw new ResourceNotFoundException("Task for Rollup Job [" + request.getId() + "] not found");
}
assert(tasks.size() == 1);
assert tasks.size() == 1;
boolean allStarted = tasks.stream().allMatch(StartRollupJobAction.Response::isStarted);
return new StartRollupJobAction.Response(allStarted);

View File

@ -91,7 +91,7 @@ public class TransportStopRollupAction extends TransportTasksAction<RollupJobTas
throw new ResourceNotFoundException("Task for Rollup Job [" + request.getId() + "] not found");
}
assert(tasks.size() == 1);
assert tasks.size() == 1;
boolean allStopped = tasks.stream().allMatch(StopRollupJobAction.Response::isStopped);
return new StopRollupJobAction.Response(allStopped);

View File

@ -50,7 +50,7 @@ class IndexerUtils {
static List<IndexRequest> processBuckets(CompositeAggregation agg, String rollupIndex, RollupJobStats stats,
GroupConfig groupConfig, String jobId) {
logger.debug("Buckets: [" + agg.getBuckets().size() + "]");
logger.debug("Buckets: [" + agg.getBuckets().size() + "][" + jobId + "]");
return agg.getBuckets().stream().map(b ->{
stats.incrementNumDocuments(b.getDocCount());
@ -61,6 +61,8 @@ class IndexerUtils {
Map<String, Object> doc = new HashMap<>(keys.size() + metrics.size());
CRC32 docId = processKeys(keys, doc, b.getDocCount(), groupConfig);
byte[] vs = jobId.getBytes(StandardCharsets.UTF_8);
docId.update(vs, 0, vs.length);
processMetrics(metrics, doc);
Set<String> computed = new HashSet<>(keys.size() + metrics.size());

View File

@ -0,0 +1,530 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.rollup;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig;
import org.elasticsearch.xpack.core.rollup.job.GroupConfig;
import org.elasticsearch.xpack.core.rollup.job.HistoGroupConfig;
import org.elasticsearch.xpack.core.rollup.job.MetricConfig;
import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
import org.joda.time.DateTimeZone;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
public class RollupJobIdentifierUtilTests extends ESTestCase {
public void testOneMatch() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = singletonSet(cap);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval());
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
assertThat(bestCaps.size(), equalTo(1));
}
public void testBiggerButCompatibleInterval() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = singletonSet(cap);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1d"));
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
assertThat(bestCaps.size(), equalTo(1));
}
public void testIncompatibleInterval() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1d")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = singletonSet(cap);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"));
RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " +
"[foo] which also satisfies all requirements of query."));
}
public void testBadTimeZone() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1d"))
.setTimeZone(DateTimeZone.forID("EST")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = singletonSet(cap);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"))
.timeZone(DateTimeZone.UTC);
RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " +
"[foo] which also satisfies all requirements of query."));
}
public void testMetricOnlyAgg() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("bar")
.setMetrics(Collections.singletonList("max"))
.build()));
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = singletonSet(cap);
MaxAggregationBuilder max = new MaxAggregationBuilder("the_max").field("bar");
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(max, caps);
assertThat(bestCaps.size(), equalTo(1));
}
public void testOneOfTwoMatchingCaps() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = singletonSet(cap);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"))
.subAggregation(new MaxAggregationBuilder("the_max").field("bar"));
RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [max] agg with name [the_max] which also satisfies " +
"all requirements of query."));
}
public void testTwoJobsSameRollupIndex() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
group.setTerms(null);
group.setHisto(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2");
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
group2.setTerms(null);
group2.setHisto(null);
job2.setGroupConfig(group.build());
job2.setRollupIndex(job.getRollupIndex());
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
caps.add(cap2);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"));
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
// Both jobs functionally identical, so only one is actually needed to be searched
assertThat(bestCaps.size(), equalTo(1));
}
public void testTwoJobsButBothPartialMatches() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("bar")
.setMetrics(Collections.singletonList("max"))
.build()));
RollupJobCaps cap = new RollupJobCaps(job.build());
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2");
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job2.setGroupConfig(group.build());
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("bar")
.setMetrics(Collections.singletonList("min"))
.build()));
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
caps.add(cap2);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"))
.subAggregation(new MaxAggregationBuilder("the_max").field("bar")) // <-- comes from job1
.subAggregation(new MinAggregationBuilder("the_min").field("bar")); // <-- comes from job2
RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [min] agg with name [the_min] which also " +
"satisfies all requirements of query."));
}
public void testComparableDifferentDateIntervals() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1d")).build())
.setHisto(null)
.setTerms(null);
job2.setGroupConfig(group2.build());
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1d"));
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
caps.add(cap2);
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
assertThat(bestCaps.size(), equalTo(1));
assertTrue(bestCaps.contains(cap2));
}
public void testComparableDifferentDateIntervalsOnlyOneWorks() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1d")).build())
.setHisto(null)
.setTerms(null);
job2.setGroupConfig(group2.build());
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"));
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
caps.add(cap2);
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
assertThat(bestCaps.size(), equalTo(1));
assertTrue(bestCaps.contains(cap));
}
public void testComparableNoHistoVsHisto() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(ConfigTestHelpers.getHisto().setInterval(100).setFields(Collections.singletonList("bar")).build())
.setTerms(null);
job2.setGroupConfig(group2.build());
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"))
.subAggregation(new HistogramAggregationBuilder("histo").field("bar").interval(100));
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
caps.add(cap2);
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
assertThat(bestCaps.size(), equalTo(1));
assertTrue(bestCaps.contains(cap2));
}
public void testComparableNoTermsVsTerms() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(ConfigTestHelpers.getTerms().setFields(Collections.singletonList("bar")).build());
job2.setGroupConfig(group2.build());
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(new DateHistogramInterval("1h"))
.subAggregation(new TermsAggregationBuilder("histo", ValueType.STRING).field("bar"));
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
caps.add(cap2);
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
assertThat(bestCaps.size(), equalTo(1));
assertTrue(bestCaps.contains(cap2));
}
public void testHistoSameNameWrongTypeInCaps() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.field("foo")
.interval(1L)
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo") // <-- NOTE same name but wrong type
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("baz")) // <-- NOTE right type but wrong name
.setInterval(1L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RollupJobIdentifierUtils.findBestJobs(histo, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] " +
"agg on field [foo] which also satisfies all requirements of query."));
}
public void testMissingDateHisto() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("other_field")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
Exception e = expectThrows(IllegalArgumentException.class, () -> RollupJobIdentifierUtils.findBestJobs(histo,caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " +
"[other_field] which also satisfies all requirements of query."));
}
public void testNoMatchingInterval() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.interval(1)
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("100d")) // <- interval in job is much higher than agg interval above
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.build();
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
Exception e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(histo, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field [foo] " +
"which also satisfies all requirements of query."));
}
public void testDateHistoMissingFieldInCaps() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("bar") // <-- NOTE different field from the one in the query
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RollupJobIdentifierUtils.findBestJobs(histo, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field [foo] which also " +
"satisfies all requirements of query."));
}
public void testHistoMissingFieldInCaps() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.interval(1)
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("bar")
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("baz")) // <-- NOTE note different field from one used in query
.setInterval(1L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RollupJobIdentifierUtils.findBestJobs(histo, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] agg on field [foo] which also " +
"satisfies all requirements of query."));
}
public void testNoMatchingHistoInterval() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.interval(1)
.field("bar")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("bar"))
.setInterval(100L) // <--- interval in job is much higher than agg interval above
.build())
.build())
.build();
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
Exception e = expectThrows(RuntimeException.class,
() -> RollupJobIdentifierUtils.findBestJobs(histo, caps));
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] agg on field " +
"[bar] which also satisfies all requirements of query."));
}
public void testMissingMetric() {
int i = ESTestCase.randomIntBetween(0, 3);
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("foo")
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
.build()));
String aggType;
Exception e;
if (i == 0) {
e = expectThrows(IllegalArgumentException.class,
() -> RollupJobIdentifierUtils.findBestJobs(new MaxAggregationBuilder("test_metric").field("other_field"), caps));
aggType = "max";
} else if (i == 1) {
e = expectThrows(IllegalArgumentException.class,
() -> RollupJobIdentifierUtils.findBestJobs(new MinAggregationBuilder("test_metric").field("other_field"), caps));
aggType = "min";
} else if (i == 2) {
e = expectThrows(IllegalArgumentException.class,
() -> RollupJobIdentifierUtils.findBestJobs(new SumAggregationBuilder("test_metric").field("other_field"), caps));
aggType = "sum";
} else {
e = expectThrows(IllegalArgumentException.class,
() -> RollupJobIdentifierUtils.findBestJobs(new AvgAggregationBuilder("test_metric").field("other_field"), caps));
aggType = "avg";
}
assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [" + aggType + "] agg with name " +
"[test_metric] which also satisfies all requirements of query."));
}
private Set<RollupJobCaps> singletonSet(RollupJobCaps cap) {
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
return caps;
}
}

View File

@ -22,27 +22,25 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilde
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig;
import org.elasticsearch.xpack.core.rollup.job.HistoGroupConfig;
import org.elasticsearch.xpack.core.rollup.job.MetricConfig;
import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
import org.hamcrest.Matchers;
import org.joda.time.DateTimeZone;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
@ -66,7 +64,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
}
public void testBasicDateHisto() {
// TODO grab some of the logic from DateHistogramTests to build more robust tests
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("foo")
@ -75,24 +72,7 @@ public class RollupRequestTranslationTests extends ESTestCase {
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry, caps);
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
assertThat(translated.size(), equalTo(1));
assertThat(translated.get(0), Matchers.instanceOf(DateHistogramAggregationBuilder.class));
DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@ -107,8 +87,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(subAggs.get("the_max"), Matchers.instanceOf(MaxAggregationBuilder.class));
assertThat(((MaxAggregationBuilder)subAggs.get("the_max")).field(), equalTo("max_field.max.value"));
assertThat(subAggs.get("the_avg"), Matchers.instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg");
assertThat(subAggs.get("the_avg.value"), Matchers.instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg.value");
assertThat(avg.field(), equalTo("avg_field.avg.value"));
assertThat(subAggs.get("the_avg._count"), Matchers.instanceOf(SumAggregationBuilder.class));
@ -119,22 +99,16 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
equalTo("foo.date_histogram._count"));
assertThat(filterConditions.size(), equalTo(4));
assertThat(filterConditions.size(), equalTo(2));
for (QueryBuilder q : filterConditions) {
if (q instanceof TermQueryBuilder) {
switch (((TermQueryBuilder) q).fieldName()) {
case "foo.date_histogram.interval":
assertThat(((TermQueryBuilder) q).value().toString(), equalTo(new DateHistogramInterval("1d").toString()));
break;
case "foo.date_histogram.time_zone":
assertThat(((TermQueryBuilder) q).value(), equalTo("UTC"));
break;
case "_rollup.computed":
assertThat(((TermQueryBuilder) q).value(), equalTo("foo.date_histogram"));
break;
case "_rollup.id":
assertThat(((TermQueryBuilder) q).value(), equalTo("foo"));
break;
default:
fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
break;
@ -155,24 +129,19 @@ public class RollupRequestTranslationTests extends ESTestCase {
String fieldName = null;
int numAggs = 1;
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(ConfigTestHelpers
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("foo")
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
.build()));
if (i == 0) {
translated = translateAggregation(new MaxAggregationBuilder("test_metric")
.field("foo"), filterConditions, namedWriteableRegistry, caps);
.field("foo"), filterConditions, namedWriteableRegistry);
clazz = MaxAggregationBuilder.class;
fieldName = "foo.max.value";
} else if (i == 1) {
translated = translateAggregation(new MinAggregationBuilder("test_metric")
.field("foo"), filterConditions, namedWriteableRegistry, caps);
.field("foo"), filterConditions, namedWriteableRegistry);
clazz = MinAggregationBuilder.class;
fieldName = "foo.min.value";
} else if (i == 2) {
translated = translateAggregation(new SumAggregationBuilder("test_metric")
.field("foo"), filterConditions, namedWriteableRegistry, caps);
.field("foo"), filterConditions, namedWriteableRegistry);
clazz = SumAggregationBuilder.class;
fieldName = "foo.sum.value";
}
@ -185,72 +154,21 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(filterConditions.size(), equalTo(0));
}
public void testMissingMetric() {
int i = ESTestCase.randomIntBetween(0, 3);
List<QueryBuilder> filterConditions = new ArrayList<>();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(ConfigTestHelpers
public void testUnsupportedMetric() {
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("foo")
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
.build()));
String aggType;
Exception e;
if (i == 0) {
e = expectThrows(IllegalArgumentException.class, () -> translateAggregation(new MaxAggregationBuilder("test_metric")
.field("other_field"), filterConditions, namedWriteableRegistry, caps));
aggType = "max";
} else if (i == 1) {
e = expectThrows(IllegalArgumentException.class, () -> translateAggregation(new MinAggregationBuilder("test_metric")
.field("other_field"), filterConditions, namedWriteableRegistry, caps));
aggType = "min";
} else if (i == 2) {
e = expectThrows(IllegalArgumentException.class, () -> translateAggregation(new SumAggregationBuilder("test_metric")
.field("other_field"), filterConditions, namedWriteableRegistry, caps));
aggType = "sum";
} else {
e = expectThrows(IllegalArgumentException.class, () -> translateAggregation(new AvgAggregationBuilder("test_metric")
.field("other_field"), filterConditions, namedWriteableRegistry, caps));
aggType = "avg";
}
assertThat(e.getMessage(), equalTo("There is not a [" + aggType + "] agg with name " +
"[other_field] configured in selected rollup indices, cannot translate aggregation."));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> translateAggregation(new StatsAggregationBuilder("test_metric")
.field("foo"), Collections.emptyList(), namedWriteableRegistry));
assertThat(e.getMessage(), equalTo("Unable to translate aggregation tree into Rollup. Aggregation [test_metric] is of type " +
"[StatsAggregationBuilder] which is currently unsupported."));
}
public void testMissingDateHisto() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("other_field")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
Exception e = expectThrows(IllegalArgumentException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("There is not a [date_histogram] agg with name " +
"[other_field] configured in selected rollup indices, cannot translate aggregation."));
}
public void testSelectLowerGranularityDateInterval() {
public void testDateHistoIntervalWithMinMax() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("foo")
@ -258,48 +176,7 @@ public class RollupRequestTranslationTests extends ESTestCase {
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job1 = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1h"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max"))
.build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg"))
.build())
)
.build();
RollupJobConfig job2 = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max"))
.build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg"))
.build())
)
.build();
List<RollupJobCaps> caps = new ArrayList<>(2);
caps.add(new RollupJobCaps(job1));
caps.add(new RollupJobCaps(job2));
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry, caps);
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
assertThat(translated.size(), equalTo(1));
assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class));
DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@ -314,8 +191,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(subAggs.get("the_max"), instanceOf(MaxAggregationBuilder.class));
assertThat(((MaxAggregationBuilder)subAggs.get("the_max")).field(), equalTo("max_field.max.value"));
assertThat(subAggs.get("the_avg"), instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg");
assertThat(subAggs.get("the_avg.value"), instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg.value");
assertThat(avg.field(), equalTo("avg_field.avg.value"));
assertThat(subAggs.get("the_avg._count"), instanceOf(SumAggregationBuilder.class));
@ -326,18 +203,14 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
equalTo("foo.date_histogram._count"));
assertThat(filterConditions.size(), equalTo(4));
assertThat(filterConditions.size(), equalTo(2));
for (QueryBuilder q : filterConditions) {
if (q instanceof TermQueryBuilder) {
if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.interval")) {
assertThat(((TermQueryBuilder) q).value().toString(), equalTo("1h")); // <---- should be instead of 1d
} else if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("UTC"));
} else if (((TermQueryBuilder) q).fieldName().equals("_rollup.computed")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("foo.date_histogram"));
} else if (((TermQueryBuilder) q).fieldName().equals("_rollup.id")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("foo"));
} else {
fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
}
@ -347,61 +220,20 @@ public class RollupRequestTranslationTests extends ESTestCase {
}
}
public void testSelectLowerGranularityInteravl() {
public void testDateHistoLongIntervalWithMinMax() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.interval(3600000)
histo.interval(86400000)
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job1 = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1h"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max"))
.build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg"))
.build())
)
.build();
RollupJobConfig job2 = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max"))
.build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg"))
.build())
)
.build();
List<RollupJobCaps> caps = new ArrayList<>(2);
caps.add(new RollupJobCaps(job1));
caps.add(new RollupJobCaps(job2));
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry, caps);
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
assertThat(translated.size(), equalTo(1));
assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class));
DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
assertThat(translatedHisto.interval(), equalTo(3600000L));
assertThat(translatedHisto.interval(), equalTo(86400000L));
assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp"));
assertThat(translatedHisto.getSubAggregations().size(), equalTo(4));
@ -411,8 +243,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(subAggs.get("the_max"), instanceOf(MaxAggregationBuilder.class));
assertThat(((MaxAggregationBuilder)subAggs.get("the_max")).field(), equalTo("max_field.max.value"));
assertThat(subAggs.get("the_avg"), instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg");
assertThat(subAggs.get("the_avg.value"), instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg.value");
assertThat(avg.field(), equalTo("avg_field.avg.value"));
assertThat(subAggs.get("the_avg._count"), instanceOf(SumAggregationBuilder.class));
@ -423,18 +255,14 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
equalTo("foo.date_histogram._count"));
assertThat(filterConditions.size(), equalTo(4));
assertThat(filterConditions.size(), equalTo(2));
for (QueryBuilder q : filterConditions) {
if (q instanceof TermQueryBuilder) {
if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.interval")) {
assertThat(((TermQueryBuilder) q).value().toString(), equalTo("1h")); // <---- should be instead of 1d
} else if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("UTC"));
} else if (((TermQueryBuilder) q).fieldName().equals("_rollup.computed")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("foo.date_histogram"));
} else if (((TermQueryBuilder) q).fieldName().equals("_rollup.id")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("foo"));
} else {
fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
}
@ -444,70 +272,17 @@ public class RollupRequestTranslationTests extends ESTestCase {
}
}
public void testNoMatchingDateInterval() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("100d")) // <- interval in job is much higher than agg interval above
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
Exception e = expectThrows(RuntimeException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("Could not find a rolled date_histogram configuration that satisfies the interval [1d]"));
}
public void testNoMatchingInterval() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.interval(1)
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("100d")) // <- interval in job is much higher than agg interval above
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
Exception e = expectThrows(RuntimeException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("Could not find a rolled date_histogram configuration that satisfies the interval [1]"));
}
public void testAvgMetric() {
List<QueryBuilder> filterConditions = new ArrayList<>();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(ConfigTestHelpers
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
.setField("foo")
.setMetrics(Collections.singletonList("avg")).build()))
.build()));
List<AggregationBuilder> translated = translateAggregation(new AvgAggregationBuilder("test_metric")
.field("foo"), filterConditions, namedWriteableRegistry, caps);
.field("foo"), filterConditions, namedWriteableRegistry);
assertThat(translated.size(), equalTo(2));
Map<String, AggregationBuilder> metrics = translated.stream()
.collect(Collectors.toMap(AggregationBuilder::getName, Function.identity()));
assertThat(metrics.get("test_metric"), Matchers.instanceOf(SumAggregationBuilder.class));
assertThat(((SumAggregationBuilder)metrics.get("test_metric")).field(),
assertThat(metrics.get("test_metric.value"), Matchers.instanceOf(SumAggregationBuilder.class));
assertThat(((SumAggregationBuilder)metrics.get("test_metric.value")).field(),
equalTo("foo.avg.value"));
assertThat(metrics.get("test_metric._count"), Matchers.instanceOf(SumAggregationBuilder.class));
@ -525,22 +300,7 @@ public class RollupRequestTranslationTests extends ESTestCase {
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setTerms(new TermsGroupConfig.Builder()
.setFields(Collections.singletonList("foo"))
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
List<AggregationBuilder> translated = translateAggregation(terms, filterConditions, namedWriteableRegistry, caps);
List<AggregationBuilder> translated = translateAggregation(terms, filterConditions, namedWriteableRegistry);
assertThat(translated.size(), equalTo(1));
assertThat(translated.get(0), Matchers.instanceOf(TermsAggregationBuilder.class));
TermsAggregationBuilder translatedHisto = (TermsAggregationBuilder)translated.get(0);
@ -554,8 +314,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(subAggs.get("the_max"), Matchers.instanceOf(MaxAggregationBuilder.class));
assertThat(((MaxAggregationBuilder)subAggs.get("the_max")).field(), equalTo("max_field.max.value"));
assertThat(subAggs.get("the_avg"), Matchers.instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg");
assertThat(subAggs.get("the_avg.value"), Matchers.instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg.value");
assertThat(avg.field(), equalTo("avg_field.avg.value"));
assertThat(subAggs.get("the_avg._count"), Matchers.instanceOf(SumAggregationBuilder.class));
@ -583,23 +343,7 @@ public class RollupRequestTranslationTests extends ESTestCase {
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("foo"))
.setInterval(1L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry, caps);
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
assertThat(translated.size(), equalTo(1));
assertThat(translated.get(0), Matchers.instanceOf(HistogramAggregationBuilder.class));
HistogramAggregationBuilder translatedHisto = (HistogramAggregationBuilder)translated.get(0);
@ -613,8 +357,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(subAggs.get("the_max"), Matchers.instanceOf(MaxAggregationBuilder.class));
assertThat(((MaxAggregationBuilder)subAggs.get("the_max")).field(), equalTo("max_field.max.value"));
assertThat(subAggs.get("the_avg"), Matchers.instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg");
assertThat(subAggs.get("the_avg.value"), Matchers.instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg.value");
assertThat(avg.field(), equalTo("avg_field.avg.value"));
assertThat(subAggs.get("the_avg._count"), Matchers.instanceOf(SumAggregationBuilder.class));
@ -625,19 +369,13 @@ public class RollupRequestTranslationTests extends ESTestCase {
assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
equalTo("foo.histogram._count"));
assertThat(filterConditions.size(), equalTo(3));
assertThat(filterConditions.size(), equalTo(1));
for (QueryBuilder q : filterConditions) {
if (q instanceof TermQueryBuilder) {
switch (((TermQueryBuilder) q).fieldName()) {
case "foo.histogram.interval":
assertThat(((TermQueryBuilder) q).value().toString(), equalTo("1"));
break;
case "_rollup.computed":
assertThat(((TermQueryBuilder) q).value(), equalTo("foo.histogram"));
break;
case "_rollup.id":
assertThat(((TermQueryBuilder) q).value(), equalTo("foo"));
break;
default:
fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
break;
@ -655,252 +393,15 @@ public class RollupRequestTranslationTests extends ESTestCase {
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("100d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
Exception e = expectThrows(RuntimeException.class,
() -> translateAggregation(geo, filterConditions, namedWriteableRegistry, caps));
() -> translateAggregation(geo, filterConditions, namedWriteableRegistry));
assertThat(e.getMessage(), equalTo("Unable to translate aggregation tree into Rollup. Aggregation [test_geo] is of type " +
"[GeoDistanceAggregationBuilder] which is currently unsupported."));
}
public void testDateHistoMissingFieldInCaps() {
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo");
histo.dateHistogramInterval(new DateHistogramInterval("1d"))
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("bar") // <-- NOTE different field from the one in the query
.setTimeZone(DateTimeZone.UTC)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("There is not a [date_histogram] agg with name [foo] configured in selected rollup " +
"indices, cannot translate aggregation."));
}
public void testHistoMissingFieldInCaps() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.interval(1)
.field("foo")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("bar")
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("baz")) // <-- NOTE note different field from one used in query
.setInterval(1L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("There is not a [histogram] agg with name [foo] configured in selected rollup " +
"indices, cannot translate aggregation."));
}
public void testHistoSameNameWrongTypeInCaps() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.field("foo")
.interval(1L)
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo") // <-- NOTE same name but wrong type
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("baz")) // <-- NOTE right type but wrong name
.setInterval(1L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max")).build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg")).build()))
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("There is not a [histogram] agg with name [foo] configured in selected rollup " +
"indices, cannot translate aggregation."));
}
public void testSelectLowerHistoGranularityInterval() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.interval(3600000)
.field("bar")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job1 = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("bar"))
.setInterval(1L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max"))
.build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg"))
.build())
)
.build();
RollupJobConfig job2 = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("bar"))
.setInterval(100L)
.build())
.build())
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
.setField("max_field")
.setMetrics(Collections.singletonList("max"))
.build(),
new MetricConfig.Builder()
.setField("avg_field")
.setMetrics(Collections.singletonList("avg"))
.build())
)
.build();
List<RollupJobCaps> caps = new ArrayList<>(2);
caps.add(new RollupJobCaps(job1));
caps.add(new RollupJobCaps(job2));
List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry, caps);
assertThat(translated.size(), equalTo(1));
assertThat(translated.get(0), instanceOf(HistogramAggregationBuilder.class));
HistogramAggregationBuilder translatedHisto = (HistogramAggregationBuilder)translated.get(0);
assertThat(translatedHisto.interval(), equalTo(3600000.0));
assertThat(translatedHisto.field(), equalTo("bar.histogram.value"));
assertThat(translatedHisto.getSubAggregations().size(), equalTo(4));
Map<String, AggregationBuilder> subAggs = translatedHisto.getSubAggregations()
.stream().collect(Collectors.toMap(AggregationBuilder::getName, Function.identity()));
assertThat(subAggs.get("the_max"), instanceOf(MaxAggregationBuilder.class));
assertThat(((MaxAggregationBuilder)subAggs.get("the_max")).field(), equalTo("max_field.max.value"));
assertThat(subAggs.get("the_avg"), instanceOf(SumAggregationBuilder.class));
SumAggregationBuilder avg = (SumAggregationBuilder)subAggs.get("the_avg");
assertThat(avg.field(), equalTo("avg_field.avg.value"));
assertThat(subAggs.get("the_avg._count"), instanceOf(SumAggregationBuilder.class));
assertThat(((SumAggregationBuilder)subAggs.get("the_avg._count")).field(),
equalTo("avg_field.avg._count"));
assertThat(subAggs.get("test_histo._count"), instanceOf(SumAggregationBuilder.class));
assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
equalTo("bar.histogram._count"));
assertThat(filterConditions.size(), equalTo(3));
for (QueryBuilder q : filterConditions) {
if (q instanceof TermQueryBuilder) {
if (((TermQueryBuilder) q).fieldName().equals("bar.histogram.interval")) {
assertThat(((TermQueryBuilder) q).value().toString(), equalTo("1")); // <---- should be instead of 100
} else if (((TermQueryBuilder) q).fieldName().equals("_rollup.computed")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("bar.histogram"));
} else if (((TermQueryBuilder) q).fieldName().equals("_rollup.id")) {
assertThat(((TermQueryBuilder) q).value(), equalTo("foo"));
} else {
fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
}
} else {
fail("Unexpected query builder in filter conditions");
}
}
}
public void testNoMatchingHistoInterval() {
HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo");
histo.interval(1)
.field("bar")
.subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
List<QueryBuilder> filterConditions = new ArrayList<>();
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
.setDateHisto(new DateHistoGroupConfig.Builder()
.setInterval(new DateHistogramInterval("1d"))
.setField("foo")
.setTimeZone(DateTimeZone.UTC)
.build())
.setHisto(new HistoGroupConfig.Builder()
.setFields(Collections.singletonList("bar"))
.setInterval(100L) // <--- interval in job is much higher than agg interval above
.build())
.build())
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
Exception e = expectThrows(RuntimeException.class,
() -> translateAggregation(histo, filterConditions, namedWriteableRegistry, caps));
assertThat(e.getMessage(), equalTo("Could not find a rolled histogram configuration that satisfies the interval [1.0]"));
private Set<RollupJobCaps> singletonSet(RollupJobCaps cap) {
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
return caps;
}
}

View File

@ -6,16 +6,20 @@
package org.elasticsearch.xpack.rollup;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.search.MultiSearchResponse;
@ -81,20 +85,16 @@ import static org.mockito.Mockito.when;
public class RollupResponseTranslationTests extends AggregatorTestCase {
public void testMSearchFailure() {
MultiSearchResponse.Item failure = new MultiSearchResponse.Item(null, new RuntimeException("foo"));
MultiSearchResponse.Item item = new MultiSearchResponse.Item(null, null);
public void testLiveFailure() {
MultiSearchResponse.Item[] failure = new MultiSearchResponse.Item[]{
new MultiSearchResponse.Item(null, new RuntimeException("foo")),
new MultiSearchResponse.Item(null, null)};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(failure, item,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), equalTo("foo"));
e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(item, failure,
() -> RollupResponseTranslator.combineResponses(failure,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), equalTo("foo"));
@ -104,13 +104,39 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
assertThat(e.getMessage(), equalTo("foo"));
e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.verifyResponse(failure));
() -> RollupResponseTranslator.verifyResponse(failure[0]));
assertThat(e.getMessage(), equalTo("foo"));
}
public void testMissingLiveIndex() {
MultiSearchResponse.Item item = new MultiSearchResponse.Item(null, new IndexNotFoundException("foo"));
public void testRollupFailure() {
MultiSearchResponse.Item[] failure = new MultiSearchResponse.Item[]{
new MultiSearchResponse.Item(null, new IndexNotFoundException("live missing")),
new MultiSearchResponse.Item(null, new RuntimeException("rollup failure"))};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(failure,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), equalTo("rollup failure"));
}
public void testLiveMissingRollupMissing() {
MultiSearchResponse.Item[] failure = new MultiSearchResponse.Item[]{
new MultiSearchResponse.Item(null, new IndexNotFoundException("foo")),
new MultiSearchResponse.Item(null, new IndexNotFoundException("foo"))};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(failure,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), equalTo("No indices (live or rollup) found during rollup search"));
}
public void testMissingLiveIndex() {
SearchResponse responseWithout = mock(SearchResponse.class);
when(responseWithout.getTook()).thenReturn(new TimeValue(100));
List<InternalAggregation> aggTree = new ArrayList<>(1);
@ -141,12 +167,15 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
Aggregations mockAggsWithout = new InternalAggregations(aggTree);
when(responseWithout.getAggregations()).thenReturn(mockAggsWithout);
MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(responseWithout, null);
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{
new MultiSearchResponse.Item(null, new IndexNotFoundException("foo")),
new MultiSearchResponse.Item(responseWithout, null)};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
SearchResponse response = RollupResponseTranslator.combineResponses(item, item2,
SearchResponse response = RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true));
assertNotNull(response);
Aggregations responseAggs = response.getAggregations();
@ -156,33 +185,36 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}
public void testRolledMissingAggs() {
MultiSearchResponse.Item item = new MultiSearchResponse.Item(null, new IndexNotFoundException("foo"));
SearchResponse responseWithout = mock(SearchResponse.class);
when(responseWithout.getTook()).thenReturn(new TimeValue(100));
Aggregations mockAggsWithout = new InternalAggregations(Collections.emptyList());
when(responseWithout.getAggregations()).thenReturn(mockAggsWithout);
MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(responseWithout, null);
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{
new MultiSearchResponse.Item(null, new IndexNotFoundException("foo")),
new MultiSearchResponse.Item(responseWithout, null)};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(item, item2,
() -> RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), equalTo("Expected to find aggregations in rollup response, but none found."));
}
public void testMissingRolledIndex() {
SearchResponse response = mock(SearchResponse.class);
MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null);
MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(null, new IndexNotFoundException("foo"));
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{
new MultiSearchResponse.Item(response, null),
new MultiSearchResponse.Item(null, new IndexNotFoundException("foo"))};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
SearchResponse finalResponse = RollupResponseTranslator.combineResponses(item, item2,
SearchResponse finalResponse = RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true));
assertThat(finalResponse, equalTo(response));
}
@ -239,7 +271,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
ScriptService scriptService = mock(ScriptService.class);
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(bigArrays, scriptService, true);
SearchResponse finalResponse = RollupResponseTranslator.translateResponse(item, context);
SearchResponse finalResponse = RollupResponseTranslator.translateResponse(new MultiSearchResponse.Item[]{item}, context);
assertNotNull(finalResponse);
Aggregations responseAggs = finalResponse.getAggregations();
assertNotNull(finalResponse);
@ -254,7 +286,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(bigArrays, scriptService, true);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.translateResponse(missing, context));
() -> RollupResponseTranslator.translateResponse(new MultiSearchResponse.Item[]{missing}, context));
assertThat(e.getMessage(), equalTo("no such index"));
}
@ -277,11 +309,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
when(responseWithout.getAggregations()).thenReturn(mockAggsWithout);
MultiSearchResponse.Item rolledResponse = new MultiSearchResponse.Item(responseWithout, null);
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{unrolledResponse, rolledResponse};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(unrolledResponse, rolledResponse,
() -> RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), containsString("Expected [bizzbuzz] to be a FilterAggregation"));
}
@ -304,11 +338,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
when(responseWithout.getAggregations()).thenReturn(mockAggsWithout);
MultiSearchResponse.Item rolledResponse = new MultiSearchResponse.Item(responseWithout, null);
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{unrolledResponse, rolledResponse};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(unrolledResponse, rolledResponse,
() -> RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(),
equalTo("Expected [filter_foo] to be a FilterAggregation, but was [InternalMax]"));
@ -356,11 +392,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
when(responseWithout.getAggregations()).thenReturn(mockAggsWithout);
MultiSearchResponse.Item rolledResponse = new MultiSearchResponse.Item(responseWithout, null);
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{unrolledResponse, rolledResponse};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
SearchResponse response = RollupResponseTranslator.combineResponses(unrolledResponse, rolledResponse,
SearchResponse response = RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true));
assertNotNull(response);
Aggregations responseAggs = response.getAggregations();
@ -392,7 +430,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
new MappedFieldType[]{fieldType}, new MappedFieldType[]{fieldType});
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.unrollAgg(responses.get(1), null, 0));
() -> RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0));
assertThat(e.getMessage(), equalTo("Unable to unroll aggregation tree. " +
"Aggregation [foo] is of type [InternalGeoBounds] which is currently unsupported."));
}
@ -426,7 +464,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.unrollAgg(responses.get(1), null, 0));
() -> RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0));
assertThat(e.getMessage(), equalTo("Unable to unroll aggregation tree. Aggregation [test] is of type " +
"[UnmappedSignificantTerms] which is currently unsupported."));
}
@ -465,12 +503,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
new InternalAggregations(Collections.singletonList(responses.get(1))), null, null, false, false, 1),
null, 1, 1, 0, 10, null, null), null);
MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{unrolledItem, rolledItem};
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(bigArrays, scriptService, true);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(
unrolledItem, rolledItem, reduceContext));
() -> RollupResponseTranslator.combineResponses(msearch, reduceContext));
assertThat(e.getMessage(), equalTo("org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds " +
"cannot be cast to org.elasticsearch.search.aggregations.InternalMultiBucketAggregation"));
}
@ -480,10 +519,10 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
.field("timestamp").interval(100);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram" + RollupField.TIMESTAMP)
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram" + RollupField.COUNT_FIELD));
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
@ -499,7 +538,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram" + RollupField.COUNT_FIELD);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
List<InternalAggregation> responses = doQueries(new MatchAllDocsQuery(),
iw -> {
@ -514,11 +553,368 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollupHisto,
new MappedFieldType[]{nrFTtimestamp}, new MappedFieldType[]{rFTtimestamp, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0);
assertThat(unrolled.toString(), equalTo(responses.get(0).toString()));
assertThat(unrolled.toString(), not(equalTo(responses.get(1).toString())));
}
public void testDateHistoWithGap() throws IOException {
DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp").interval(100)
.minDocCount(0);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.minDocCount(0)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
nrFTtimestamp.setHasDocValues(true);
nrFTtimestamp.setName(nonRollupHisto.field());
DateFieldMapper.Builder rBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType rFTtimestamp = rBuilder.fieldType();
rFTtimestamp.setHasDocValues(true);
rFTtimestamp.setName(rollupHisto.field());
NumberFieldMapper.Builder valueBuilder = new NumberFieldMapper.Builder("histo." + RollupField.COUNT_FIELD,
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
List<InternalAggregation> responses = doQueries(new MatchAllDocsQuery(),
iw -> {
iw.addDocument(timestampedValueDoc(100, 1));
iw.addDocument(timestampedValueDoc(200, 2));
iw.addDocument(timestampedValueDoc(400, 3));
}, nonRollupHisto,
iw -> {
iw.addDocument(timestampedValueRollupDoc(100, 1));
iw.addDocument(timestampedValueRollupDoc(200, 2));
iw.addDocument(timestampedValueRollupDoc(400, 3));
}, rollupHisto,
new MappedFieldType[]{nrFTtimestamp}, new MappedFieldType[]{rFTtimestamp, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0);
// Reduce the InternalDateHistogram response so we can fill buckets
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(bigArrays, scriptService, true);
InternalAggregation reduced = ((InternalDateHistogram)unrolled).doReduce(Collections.singletonList(unrolled), context);
assertThat(reduced.toString(), equalTo("{\"histo\":{\"buckets\":[{\"key_as_string\":\"1970-01-01T00:00:00.100Z\",\"key\":100," +
"\"doc_count\":1},{\"key_as_string\":\"1970-01-01T00:00:00.200Z\",\"key\":200,\"doc_count\":1}," +
"{\"key_as_string\":\"1970-01-01T00:00:00.300Z\",\"key\":300,\"doc_count\":0,\"histo._count\":{\"value\":0.0}}," +
"{\"key_as_string\":\"1970-01-01T00:00:00.400Z\",\"key\":400,\"doc_count\":1}]}}"));
}
public void testNonMatchingPartition() throws IOException {
DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp").interval(100)
.minDocCount(0);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.minDocCount(0)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
nrFTtimestamp.setHasDocValues(true);
nrFTtimestamp.setName(nonRollupHisto.field());
DateFieldMapper.Builder rBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType rFTtimestamp = rBuilder.fieldType();
rFTtimestamp.setHasDocValues(true);
rFTtimestamp.setName(rollupHisto.field());
NumberFieldMapper.Builder valueBuilder = new NumberFieldMapper.Builder("histo." + RollupField.COUNT_FIELD,
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
KeywordFieldMapper.Builder nrKeywordBuilder = new KeywordFieldMapper.Builder("partition");
KeywordFieldMapper.KeywordFieldType nrKeywordFT = nrKeywordBuilder.fieldType();
nrKeywordFT.setHasDocValues(true);
nrKeywordFT.setName("partition");
KeywordFieldMapper.Builder rKeywordBuilder = new KeywordFieldMapper.Builder("partition");
KeywordFieldMapper.KeywordFieldType rKeywordFT = rKeywordBuilder.fieldType();
rKeywordFT.setHasDocValues(true);
rKeywordFT.setName("partition");
// Note: term query for "a"
List<InternalAggregation> results = new ArrayList<>(2);
results.add(doQuery(new TermQuery(new Term("partition", "a")), iw -> {
// Time 100: Two "a" documents, one "b" doc
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp", 100));
doc.add(new TextField("partition", "a", Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp", 100));
doc.add(new TextField("partition", "a", Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp", 100));
doc.add(new TextField("partition", "b", Field.Store.NO));
iw.addDocument(doc);
// Time 200: one "a" document, one "b" doc
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp", 200));
doc.add(new TextField("partition", "a", Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp", 200));
doc.add(new TextField("partition", "b", Field.Store.NO));
iw.addDocument(doc);
}, nonRollupHisto, new MappedFieldType[]{nrFTtimestamp, nrKeywordFT}));
// Note: term query for "a"
results.add(doQuery(new TermQuery(new Term("partition.terms." + RollupField.VALUE, "a")),
iw -> {
// Time 100: Two "a" documents, one "b" doc
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 100));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 2));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
doc.add(new TextField("partition.terms." + RollupField.VALUE, "a", Field.Store.NO));
doc.add(new SortedNumericDocValuesField("partition.terms." + RollupField.COUNT_FIELD, 2));
iw.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 100));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 1));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
doc.add(new TextField("partition.terms." + RollupField.VALUE, "b", Field.Store.NO));
doc.add(new SortedNumericDocValuesField("partition.terms." + RollupField.COUNT_FIELD, 1));
iw.addDocument(doc);
// Time 200: one "a" document, one "b" doc
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 200));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 1));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
doc.add(new TextField("partition.terms." + RollupField.VALUE, "a", Field.Store.NO));
doc.add(new SortedNumericDocValuesField("partition.terms." + RollupField.COUNT_FIELD, 1));
iw.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 200));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 1));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
doc.add(new TextField("partition.terms." + RollupField.VALUE, "b", Field.Store.NO));
doc.add(new SortedNumericDocValuesField("partition.terms." + RollupField.COUNT_FIELD, 1));
iw.addDocument(doc);
}, rollupHisto, new MappedFieldType[]{rFTtimestamp, rFTvalue, rKeywordFT}));
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(results.get(1), null, null, 0);
assertThat(((InternalDateHistogram)unrolled).getBuckets().size(), equalTo(2));
assertThat(((InternalDateHistogram)unrolled).getBuckets().get(0).getDocCount(), equalTo(2L)); // two "a" at 100
assertThat(((InternalDateHistogram)unrolled).getBuckets().get(1).getDocCount(), equalTo(1L)); // one "a" at 200
assertThat(((InternalDateHistogram)unrolled).getBuckets().get(0).getKeyAsString(), equalTo("1970-01-01T00:00:00.100Z"));
assertThat(unrolled.toString(), equalTo("{\"histo\":{\"buckets\":[{\"key_as_string\":\"1970-01-01T00:00:00.100Z\"," +
"\"key\":100,\"doc_count\":2},{\"key_as_string\":\"1970-01-01T00:00:00.200Z\",\"key\":200,\"doc_count\":1}]}}"));
assertThat(unrolled.toString(), not(equalTo(results.get(1).toString())));
}
public void testDateHistoOverlappingAggTrees() throws IOException {
DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp").interval(100);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
nrFTtimestamp.setHasDocValues(true);
nrFTtimestamp.setName(nonRollupHisto.field());
DateFieldMapper.Builder rBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType rFTtimestamp = rBuilder.fieldType();
rFTtimestamp.setHasDocValues(true);
rFTtimestamp.setName(rollupHisto.field());
NumberFieldMapper.Builder valueBuilder = new NumberFieldMapper.Builder("histo." + RollupField.COUNT_FIELD,
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
List<InternalAggregation> responses = doQueries(new MatchAllDocsQuery(),
iw -> {
iw.addDocument(timestampedValueDoc(100, 1));
iw.addDocument(timestampedValueDoc(200, 2));
iw.addDocument(timestampedValueDoc(300, 3));
}, nonRollupHisto,
iw -> {
iw.addDocument(timestampedValueRollupDoc(100, 1));
iw.addDocument(timestampedValueRollupDoc(200, 2));
iw.addDocument(timestampedValueRollupDoc(300, 3));
}, rollupHisto,
new MappedFieldType[]{nrFTtimestamp}, new MappedFieldType[]{rFTtimestamp, rFTvalue});
List<InternalAggregation> currentTree = doQueries(new MatchAllDocsQuery(),
iw -> {
iw.addDocument(timestampedValueDoc(100, 1));
iw.addDocument(timestampedValueDoc(200, 2));
}, nonRollupHisto,
iw -> {
iw.addDocument(timestampedValueRollupDoc(100, 1));
iw.addDocument(timestampedValueRollupDoc(200, 2));
}, rollupHisto,
new MappedFieldType[]{nrFTtimestamp}, new MappedFieldType[]{rFTtimestamp, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, currentTree.get(1), 0);
// Times 100/200 overlap with currentTree, so doc_count will be zero
assertThat(((Object[])unrolled.getProperty("_count"))[0], equalTo(0L));
assertThat(((Object[])unrolled.getProperty("_count"))[1], equalTo(0L));
// This time (300) was not in the currentTree so it will have a doc_count of one
assertThat(((Object[])unrolled.getProperty("_count"))[2], equalTo(1L));
}
public void testDateHistoOverlappingMergeRealIntoZero() throws IOException {
DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp").interval(100);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
nrFTtimestamp.setHasDocValues(true);
nrFTtimestamp.setName(nonRollupHisto.field());
DateFieldMapper.Builder rBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType rFTtimestamp = rBuilder.fieldType();
rFTtimestamp.setHasDocValues(true);
rFTtimestamp.setName(rollupHisto.field());
NumberFieldMapper.Builder valueBuilder = new NumberFieldMapper.Builder("histo." + RollupField.COUNT_FIELD,
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
List<InternalAggregation> responses = doQueries(new MatchAllDocsQuery(),
iw -> {
iw.addDocument(timestampedValueDoc(100, 1));
iw.addDocument(timestampedValueDoc(200, 2));
iw.addDocument(timestampedValueDoc(300, 3));
}, nonRollupHisto,
iw -> {
iw.addDocument(timestampedValueRollupDoc(100, 1));
iw.addDocument(timestampedValueRollupDoc(200, 2));
iw.addDocument(timestampedValueRollupDoc(300, 3));
}, rollupHisto,
new MappedFieldType[]{nrFTtimestamp}, new MappedFieldType[]{rFTtimestamp, rFTvalue});
InternalAggregation currentTree = doQuery(new MatchAllDocsQuery(),
iw -> {
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 100));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 0));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
iw.addDocument(doc);
Document doc2 = new Document();
doc2.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 200));
doc2.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 0));
doc2.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
iw.addDocument(doc2);
}, rollupHisto, new MappedFieldType[]{rFTtimestamp, rFTvalue});
// In this test we merge real buckets into zero count buckets (e.g. empty list of buckets after unrolling)
InternalAggregation unrolledCurrentTree = RollupResponseTranslator.unrollAgg(currentTree, null, null, 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, unrolledCurrentTree, 0);
// Times 100/200 overlap with currentTree, but doc_count was zero, so returned doc_count should be one
assertThat(((Object[])unrolled.getProperty("_count"))[0], equalTo(1L));
assertThat(((Object[])unrolled.getProperty("_count"))[1], equalTo(1L));
// This time (300) was not in the currentTree so it will have a doc_count of one
assertThat(((Object[])unrolled.getProperty("_count"))[2], equalTo(1L));
}
public void testDateHistoOverlappingMergeZeroIntoReal() throws IOException {
DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp").interval(100).minDocCount(0);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.minDocCount(0)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
nrFTtimestamp.setHasDocValues(true);
nrFTtimestamp.setName(nonRollupHisto.field());
DateFieldMapper.Builder rBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType rFTtimestamp = rBuilder.fieldType();
rFTtimestamp.setHasDocValues(true);
rFTtimestamp.setName(rollupHisto.field());
NumberFieldMapper.Builder valueBuilder = new NumberFieldMapper.Builder("histo." + RollupField.COUNT_FIELD,
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
InternalAggregation currentTree = doQuery(new MatchAllDocsQuery(),
iw -> {
iw.addDocument(timestampedValueRollupDoc(100, 1));
iw.addDocument(timestampedValueRollupDoc(200, 2));
iw.addDocument(timestampedValueRollupDoc(300, 3));
}, rollupHisto, new MappedFieldType[]{rFTtimestamp, rFTvalue});
InternalAggregation responses = doQuery(new MatchAllDocsQuery(),
iw -> {
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 100));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 0));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
iw.addDocument(doc);
Document doc2 = new Document();
doc2.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, 200));
doc2.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 0));
doc2.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
iw.addDocument(doc2);
}, rollupHisto, new MappedFieldType[]{rFTtimestamp, rFTvalue});
// In this test, we merge zero_count buckets into existing buckets to ensure the metrics remain
InternalAggregation unrolledCurrentTree = RollupResponseTranslator.unrollAgg(currentTree, null, null, 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses, null, unrolledCurrentTree, 0);
// All values overlap and were zero counts themselves, so the unrolled response should be empty list of buckets
assertThat(((InternalDateHistogram)unrolled).getBuckets().size(), equalTo(0));
}
public void testAvg() throws IOException {
AvgAggregationBuilder nonRollup = new AvgAggregationBuilder("avg")
.field("foo");
@ -551,7 +947,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
// NOTE: we manually set the count to 3 here, which is somewhat cheating. Will have to rely on
// other tests to verify that the avg's count is set correctly
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, 3);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 3);
assertThat(unrolled.toString(), equalTo(responses.get(0).toString()));
assertThat(unrolled.toString(), not(equalTo(responses.get(1).toString())));
}
@ -605,7 +1001,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollup,
new MappedFieldType[]{nrFTvalue}, new MappedFieldType[]{rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, 1);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 1);
assertThat(unrolled.toString(), equalTo(responses.get(0).toString()));
}
@ -639,7 +1035,8 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollup,
new MappedFieldType[]{nrFTvalue}, new MappedFieldType[]{rFTvalue});
RuntimeException e = expectThrows(RuntimeException.class, () -> RollupResponseTranslator.unrollAgg(responses.get(1), null, 1));
RuntimeException e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 1));
assertThat(e.getMessage(), equalTo("Unable to unroll metric. Aggregation [test_metric] is of type " +
"[InternalCardinality] which is currently unsupported."));
}
@ -680,7 +1077,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollupTerms,
new MappedFieldType[]{nrFTterm}, new MappedFieldType[]{rFTterm, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0);
assertThat(unrolled.toString(), equalTo(responses.get(0).toString()));
assertThat(unrolled.toString(), not(equalTo(responses.get(1).toString())));
}
@ -721,7 +1118,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollupTerms,
new MappedFieldType[]{nrFTterm}, new MappedFieldType[]{rFTterm, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0);
assertThat(unrolled.toString(), equalTo(responses.get(0).toString()));
assertThat(unrolled.toString(), not(equalTo(responses.get(1).toString())));
}
@ -765,7 +1162,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollupHisto,
new MappedFieldType[]{nrFTbar}, new MappedFieldType[]{rFTbar, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), null, null, 0);
assertThat(unrolled.toString(), equalTo(responses.get(0).toString()));
assertThat(unrolled.toString(), not(equalTo(responses.get(1).toString())));
}
@ -775,10 +1172,10 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
.field("timestamp").interval(100);
DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo")
.field("timestamp.date_histogram" + RollupField.TIMESTAMP)
.field("timestamp.date_histogram." + RollupField.TIMESTAMP)
.interval(100)
.subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD)
.field("timestamp.date_histogram" + RollupField.COUNT_FIELD));
.field("timestamp.date_histogram." + RollupField.COUNT_FIELD));
DateFieldMapper.Builder nrBuilder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType nrFTtimestamp = nrBuilder.fieldType();
@ -794,7 +1191,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
MappedFieldType rFTvalue = valueBuilder.fieldType();
rFTvalue.setHasDocValues(true);
rFTvalue.setName("timestamp.date_histogram" + RollupField.COUNT_FIELD);
rFTvalue.setName("timestamp.date_histogram." + RollupField.COUNT_FIELD);
List<InternalAggregation> responses = doQueries(new MatchAllDocsQuery(),
iw -> {
@ -810,7 +1207,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
}, rollupHisto,
new MappedFieldType[]{nrFTtimestamp}, new MappedFieldType[]{rFTtimestamp, rFTvalue});
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), responses.get(0), 0);
InternalAggregation unrolled = RollupResponseTranslator.unrollAgg(responses.get(1), responses.get(0), null, 0);
assertThat(((InternalDateHistogram)unrolled).getBuckets().size(), equalTo(1));
assertThat(((InternalDateHistogram)unrolled).getBuckets().get(0).getDocCount(), equalTo(1L));
assertThat(((InternalDateHistogram)unrolled).getBuckets().get(0).getKeyAsString(), equalTo("1970-01-01T00:00:00.400Z"));
@ -829,9 +1226,9 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
private Document timestampedValueRollupDoc(long timestamp, long value) {
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram" + RollupField.TIMESTAMP, timestamp));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram" + RollupField.COUNT_FIELD, 1));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram" + RollupField.INTERVAL, 1));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.TIMESTAMP, timestamp));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.COUNT_FIELD, 1));
doc.add(new SortedNumericDocValuesField("timestamp.date_histogram." + RollupField.INTERVAL, 1));
doc.add(new SortedNumericDocValuesField("foo.avg." + RollupField.VALUE, value));
doc.add(new SortedNumericDocValuesField("foo.avg." + RollupField.COUNT_FIELD, 3));
doc.add(new SortedNumericDocValuesField("foo.min." + RollupField.VALUE, value));

View File

@ -7,9 +7,9 @@ package org.elasticsearch.xpack.rollup.action;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
import java.util.ArrayList;
import java.util.Collections;

View File

@ -28,9 +28,11 @@ import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.RangeQueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
@ -52,6 +54,7 @@ import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig;
import org.elasticsearch.xpack.core.rollup.job.GroupConfig;
import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
import org.elasticsearch.xpack.rollup.Rollup;
import org.hamcrest.core.IsEqual;
import org.joda.time.DateTimeZone;
import org.mockito.Mockito;
@ -60,20 +63,35 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.xpack.core.rollup.RollupField.COUNT_FIELD;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.mockito.Mockito.mock;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.when;
public class SearchActionTests extends ESTestCase {
private NamedWriteableRegistry namedWriteableRegistry;
public void setUp() throws Exception {
super.setUp();
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
}
public void testNonZeroSize() {
String[] normalIndices = new String[]{randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{randomAlphaOfLength(10)};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet());
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(new MatchAllQueryBuilder());
source.size(100);
@ -81,37 +99,28 @@ public class SearchActionTests extends ESTestCase {
SearchRequest request = new SearchRequest(normalIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
Exception e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.createMSearchRequest(request, registry,
normalIndices, rollupIndices, Collections.emptyList()));
() -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx));
assertThat(e.getMessage(), equalTo("Rollup does not support returning search hits, please try again with [size: 0]."));
}
public void testBadQuery() {
String[] normalIndices = new String[]{randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{randomAlphaOfLength(10)};
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(new MatchPhraseQueryBuilder("foo", "bar"));
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").interval(123));
source.size(0);
SearchRequest request = new SearchRequest(normalIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
Exception e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.createMSearchRequest(request, registry,
normalIndices, rollupIndices, Collections.emptyList()));
assertThat(e.getMessage(), equalTo("Unsupported Query in search request: [match_phrase]"));
e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.rewriteQuery(new MatchPhraseQueryBuilder("foo", "bar"), Collections.emptyList()));
() -> TransportRollupSearchAction.rewriteQuery(new MatchPhraseQueryBuilder("foo", "bar"), Collections.emptySet()));
assertThat(e.getMessage(), equalTo("Unsupported Query in search request: [match_phrase]"));
}
public void testRange() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(ConfigTestHelpers.getDateHisto().setField("foo").build());
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
List<RollupJobCaps> caps = Collections.singletonList(cap);
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
QueryBuilder rewritten = null;
try {
rewritten = TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1), caps);
@ -119,7 +128,7 @@ public class SearchActionTests extends ESTestCase {
fail("Should not have thrown exception when parsing query.");
}
assertThat(rewritten, instanceOf(RangeQueryBuilder.class));
assertThat(((RangeQueryBuilder)rewritten).fieldName(), equalTo("foo.date_histogram.value"));
assertThat(((RangeQueryBuilder)rewritten).fieldName(), equalTo("foo.date_histogram.timestamp"));
}
public void testTerms() {
@ -128,7 +137,8 @@ public class SearchActionTests extends ESTestCase {
group.setTerms(ConfigTestHelpers.getTerms().setFields(Collections.singletonList("foo")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
List<RollupJobCaps> caps = Collections.singletonList(cap);
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
QueryBuilder rewritten = null;
try {
rewritten = TransportRollupSearchAction.rewriteQuery(new TermQueryBuilder("foo", "bar"), caps);
@ -142,10 +152,11 @@ public class SearchActionTests extends ESTestCase {
public void testCompounds() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(ConfigTestHelpers.getDateHisto().setField("foo").build());
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
List<RollupJobCaps> caps = Collections.singletonList(cap);
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
BoolQueryBuilder builder = new BoolQueryBuilder();
builder.must(getQueryBuilder(2));
@ -162,10 +173,11 @@ public class SearchActionTests extends ESTestCase {
public void testMatchAll() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(ConfigTestHelpers.getDateHisto().setField("foo").build());
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
List<RollupJobCaps> caps = Collections.singletonList(cap);
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
try {
QueryBuilder rewritten = TransportRollupSearchAction.rewriteQuery(new MatchAllQueryBuilder(), caps);
assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class));
@ -177,18 +189,19 @@ public class SearchActionTests extends ESTestCase {
public void testAmbiguousResolution() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(ConfigTestHelpers.getDateHisto().setField("foo").build());
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
group.setTerms(ConfigTestHelpers.getTerms().setFields(Collections.singletonList("foo")).build()).build();
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
List<RollupJobCaps> caps = Collections.singletonList(cap);
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1), caps));
assertThat(e.getMessage(), equalTo("Ambiguous field name resolution when mapping to rolled fields. " +
"Field name [foo] was mapped to: [foo.date_histogram.value,foo.terms.value]."));
"Field name [foo] was mapped to: [foo.date_histogram.timestamp,foo.terms.value]."));
}
private QueryBuilder getQueryBuilder(int levels) {
public static QueryBuilder getQueryBuilder(int levels) {
if (levels == 0) {
return ESTestCase.randomBoolean() ? new MatchAllQueryBuilder() : new RangeQueryBuilder("foo").gt(1);
}
@ -222,6 +235,8 @@ public class SearchActionTests extends ESTestCase {
public void testPostFilter() {
String[] normalIndices = new String[]{randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{randomAlphaOfLength(10)};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet());
SearchSourceBuilder source = new SearchSourceBuilder();
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").interval(123));
source.postFilter(new TermQueryBuilder("foo", "bar"));
@ -229,8 +244,7 @@ public class SearchActionTests extends ESTestCase {
SearchRequest request = new SearchRequest(normalIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
Exception e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.createMSearchRequest(request, registry,
normalIndices, rollupIndices, Collections.emptyList()));
() -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx));
assertThat(e.getMessage(), equalTo("Rollup search does not support post filtering."));
}
@ -281,39 +295,71 @@ public class SearchActionTests extends ESTestCase {
public void testNoAgg() {
String[] normalIndices = new String[]{randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{randomAlphaOfLength(10)};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet());
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(new MatchAllQueryBuilder());
source.size(0);
SearchRequest request = new SearchRequest(normalIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
Exception e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.createMSearchRequest(request, registry,
normalIndices, rollupIndices, Collections.emptyList()));
() -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx));
assertThat(e.getMessage(), equalTo("Rollup requires at least one aggregation to be set."));
}
public void testNoLiveNoRollup() {
String[] normalIndices = new String[0];
String[] rollupIndices = new String[0];
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet());
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(new MatchAllQueryBuilder());
source.size(0);
SearchRequest request = new SearchRequest(normalIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
Exception e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx));
assertThat(e.getMessage(), equalTo("Must specify at least one rollup index in _rollup_search API"));
}
public void testLiveOnlyCreateMSearch() {
String[] normalIndices = new String[]{randomAlphaOfLength(10)};
String[] rollupIndices = new String[0];
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet());
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(new MatchAllQueryBuilder());
source.size(0);
SearchRequest request = new SearchRequest(normalIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry, ctx);
assertThat(msearch.requests().size(), equalTo(1));
assertThat(msearch.requests().get(0), equalTo(request));
}
public void testGood() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(ConfigTestHelpers.getDateHisto().setField("foo").build());
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
List<RollupJobCaps> caps = Collections.singletonList(cap);
Set<RollupJobCaps> caps = singletonSet(cap);
String[] normalIndices = new String[]{ESTestCase.randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{ESTestCase.randomAlphaOfLength(10)};
String[] combinedIndices = new String[]{normalIndices[0], rollupIndices[0]};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, caps);
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(getQueryBuilder(1));
source.size(0);
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()));
SearchRequest request = new SearchRequest(combinedIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry,
normalIndices, rollupIndices, caps);
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
assertThat(msearch.requests().size(), equalTo(2));
assertThat(msearch.requests().get(0), equalTo(new SearchRequest(normalIndices, request.source())));
@ -337,7 +383,6 @@ public class SearchActionTests extends ESTestCase {
source.size(0);
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").dateHistogramInterval(new DateHistogramInterval("1d")));
SearchRequest request = new SearchRequest(combinedIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
@ -348,10 +393,12 @@ public class SearchActionTests extends ESTestCase {
.build())
.build())
.build();
List<RollupJobCaps> caps = Collections.singletonList(new RollupJobCaps(job));
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry,
normalIndices, rollupIndices, caps);
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, caps);
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
assertThat(msearch.requests().size(), equalTo(2));
assertThat(msearch.requests().get(0), equalTo(new SearchRequest(normalIndices, request.source())));
@ -365,6 +412,114 @@ public class SearchActionTests extends ESTestCase {
assert(rollup.source().aggregations().getAggregatorFactories().get(0) instanceof FilterAggregationBuilder);
}
public void testTwoMatchingJobs() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
job2.setGroupConfig(group.build());
// so that the jobs aren't exactly equal
job2.setMetricsConfig(Collections.singletonList(ConfigTestHelpers.getMetricConfig().build()));
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
caps.add(cap2);
String[] normalIndices = new String[]{ESTestCase.randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{ESTestCase.randomAlphaOfLength(10)};
String[] combinedIndices = new String[]{normalIndices[0], rollupIndices[0]};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, caps);
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(getQueryBuilder(1));
source.size(0);
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()));
SearchRequest request = new SearchRequest(combinedIndices, source);
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
assertThat(msearch.requests().size(), equalTo(2));
assertThat(msearch.requests().get(0), equalTo(new SearchRequest(normalIndices, request.source())));
SearchRequest normal = msearch.requests().get(0);
assertThat(normal.indices().length, equalTo(1));
assertThat(normal.indices()[0], equalTo(normalIndices[0]));
SearchRequest rollup = msearch.requests().get(1);
assertThat(rollup.indices().length, equalTo(1));
assertThat(rollup.indices()[0], equalTo(rollupIndices[0]));
assert(rollup.source().aggregations().getAggregatorFactories().get(0) instanceof FilterAggregationBuilder);
assertThat(msearch.requests().size(), equalTo(2));
}
public void testTwoMatchingJobsOneBetter() {
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build())
.setHisto(null)
.setTerms(null);
job.setGroupConfig(group.build());
RollupJobCaps cap = new RollupJobCaps(job.build());
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
group2.setDateHisto(group.getDateHisto())
.setHisto(ConfigTestHelpers.getHisto().build())
.setTerms(null);
job2.setGroupConfig(group2.build());
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
Set<RollupJobCaps> caps = new HashSet<>(2);
caps.add(cap);
caps.add(cap2);
String[] normalIndices = new String[]{ESTestCase.randomAlphaOfLength(10)};
String[] rollupIndices = new String[]{ESTestCase.randomAlphaOfLength(10)};
String[] combinedIndices = new String[]{normalIndices[0], rollupIndices[0]};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, caps);
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(getQueryBuilder(1));
source.size(0);
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo")
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()));
SearchRequest request = new SearchRequest(combinedIndices, source);
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
assertThat(msearch.requests().size(), equalTo(2));
assertThat(msearch.requests().get(0), equalTo(new SearchRequest(normalIndices, request.source())));
SearchRequest normal = msearch.requests().get(0);
assertThat(normal.indices().length, equalTo(1));
assertThat(normal.indices()[0], equalTo(normalIndices[0]));
SearchRequest rollup = msearch.requests().get(1);
assertThat(rollup.indices().length, equalTo(1));
assertThat(rollup.indices()[0], equalTo(rollupIndices[0]));
assert(rollup.source().aggregations().getAggregatorFactories().get(0) instanceof FilterAggregationBuilder);
// The executed query should match the first job ("foo") because the second job contained a histo and the first didn't,
// so the first job will be "better"
BoolQueryBuilder bool1 = new BoolQueryBuilder()
.must(TransportRollupSearchAction.rewriteQuery(request.source().query(), caps))
.filter(new TermQueryBuilder(RollupField.formatMetaField(RollupField.ID.getPreferredName()), "foo"))
.filter(new TermQueryBuilder(RollupField.formatMetaField(RollupField.VERSION_FIELD), Rollup.ROLLUP_VERSION));
assertThat(msearch.requests().get(1).source().query(), equalTo(bool1));
}
public void testNoIndicesToSeparate() {
String[] indices = new String[]{};
ImmutableOpenMap<String, IndexMetaData> meta = ImmutableOpenMap.<String, IndexMetaData>builder().build();
@ -383,11 +538,11 @@ public class SearchActionTests extends ESTestCase {
public void testEmptyMetadata() {
String[] indices = new String[]{"foo", "bar"};
ImmutableOpenMap<String, IndexMetaData> meta = ImmutableOpenMap.<String, IndexMetaData>builder().build();
TransportRollupSearchAction.Triple<String[], String[], List<RollupJobCaps>> result
TransportRollupSearchAction.RollupSearchContext result
= TransportRollupSearchAction.separateIndices(indices, meta);
assertThat(result.v1().length, equalTo(2));
assertThat(result.v2().length, equalTo(0));
assertThat(result.v3().size(), equalTo(0));
assertThat(result.getLiveIndices().length, equalTo(2));
assertThat(result.getRollupIndices().length, equalTo(0));
assertThat(result.getJobCaps().size(), equalTo(0));
}
public void testNoMatchingIndexInMetadata() {
@ -395,11 +550,11 @@ public class SearchActionTests extends ESTestCase {
IndexMetaData indexMetaData = mock(IndexMetaData.class);
ImmutableOpenMap.Builder<String, IndexMetaData> meta = ImmutableOpenMap.builder(1);
meta.put("bar", indexMetaData);
TransportRollupSearchAction.Triple<String[], String[], List<RollupJobCaps>> result
TransportRollupSearchAction.RollupSearchContext result
= TransportRollupSearchAction.separateIndices(indices, meta.build());
assertThat(result.v1().length, equalTo(1));
assertThat(result.v2().length, equalTo(0));
assertThat(result.v3().size(), equalTo(0));
assertThat(result.getLiveIndices().length, equalTo(1));
assertThat(result.getRollupIndices().length, equalTo(0));
assertThat(result.getJobCaps().size(), equalTo(0));
}
public void testMatchingIndexInMetadata() throws IOException {
@ -421,20 +576,20 @@ public class SearchActionTests extends ESTestCase {
ImmutableOpenMap.Builder<String, IndexMetaData> metaMap = ImmutableOpenMap.builder(1);
metaMap.put("foo", meta);
TransportRollupSearchAction.Triple<String[], String[], List<RollupJobCaps>> result
TransportRollupSearchAction.RollupSearchContext result
= TransportRollupSearchAction.separateIndices(indices, metaMap.build());
assertThat(result.v1().length, equalTo(0));
assertThat(result.v2().length, equalTo(1));
assertThat(result.v2()[0], equalTo("foo"));
assertThat(result.v3().size(), equalTo(1));
assertThat(result.getLiveIndices().length, equalTo(0));
assertThat(result.getRollupIndices().length, equalTo(1));
assertThat(result.getRollupIndices()[0], equalTo("foo"));
assertThat(result.getJobCaps().size(), equalTo(1));
}
public void testLiveOnly() {
public void testLiveOnlyProcess() {
String[] indices = new String[]{"foo"};
IndexMetaData indexMetaData = mock(IndexMetaData.class);
ImmutableOpenMap.Builder<String, IndexMetaData> meta = ImmutableOpenMap.builder(1);
meta.put("bar", indexMetaData);
TransportRollupSearchAction.Triple<String[], String[], List<RollupJobCaps>> result
TransportRollupSearchAction.RollupSearchContext result
= TransportRollupSearchAction.separateIndices(indices, meta.build());
SearchResponse response = mock(SearchResponse.class);
@ -465,7 +620,7 @@ public class SearchActionTests extends ESTestCase {
ImmutableOpenMap.Builder<String, IndexMetaData> metaMap = ImmutableOpenMap.builder(1);
metaMap.put("foo", indexMeta);
TransportRollupSearchAction.Triple<String[], String[], List<RollupJobCaps>> result
TransportRollupSearchAction.RollupSearchContext result
= TransportRollupSearchAction.separateIndices(indices, metaMap.build());
SearchResponse response = mock(SearchResponse.class);
@ -510,6 +665,41 @@ public class SearchActionTests extends ESTestCase {
assertThat(avg.getValue(), IsEqual.equalTo(5.0));
}
public void testTooManyRollups() throws IOException {
String[] indices = new String[]{"foo", "bar"};
String jobName = randomAlphaOfLength(5);
RollupJobConfig job = ConfigTestHelpers.getRollupJob(jobName).build();
MappingMetaData mappingMeta = new MappingMetaData(RollupField.NAME,
Collections.singletonMap(RollupField.NAME,
Collections.singletonMap("_meta",
Collections.singletonMap(RollupField.ROLLUP_META,
Collections.singletonMap(jobName, job)))));
ImmutableOpenMap.Builder<String, MappingMetaData> mappings = ImmutableOpenMap.builder(1);
mappings.put(RollupField.NAME, mappingMeta);
IndexMetaData indexMeta = Mockito.mock(IndexMetaData.class);
when(indexMeta.getMappings()).thenReturn(mappings.build());
ImmutableOpenMap.Builder<String, IndexMetaData> metaMap = ImmutableOpenMap.builder(2);
metaMap.put("foo", indexMeta);
metaMap.put("bar", indexMeta);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.separateIndices(indices, metaMap.build()));
assertThat(e.getMessage(), equalTo("RollupSearch currently only supports searching one rollup index at a time."));
}
public void testEmptyMsearch() {
TransportRollupSearchAction.RollupSearchContext result
= new TransportRollupSearchAction.RollupSearchContext(new String[0], new String[0], Collections.emptySet());
MultiSearchResponse msearchResponse = new MultiSearchResponse(new MultiSearchResponse.Item[0], 1);
RuntimeException e = expectThrows(RuntimeException.class, () -> TransportRollupSearchAction.processResponses(result,
msearchResponse, mock(InternalAggregation.ReduceContext.class)));
assertThat(e.getMessage(), equalTo("MSearch response was empty, cannot unroll RollupSearch results"));
}
public void testBoth() throws IOException {
String[] indices = new String[]{"foo", "bar"};
@ -537,7 +727,7 @@ public class SearchActionTests extends ESTestCase {
ImmutableOpenMap.Builder<String, IndexMetaData> metaMap = ImmutableOpenMap.builder(2);
metaMap.put("foo", indexMeta);
metaMap.put("bar", liveIndexMeta);
TransportRollupSearchAction.Triple<String[], String[], List<RollupJobCaps>> separateIndices
TransportRollupSearchAction.RollupSearchContext separateIndices
= TransportRollupSearchAction.separateIndices(indices, metaMap.build());
@ -599,4 +789,10 @@ public class SearchActionTests extends ESTestCase {
assertThat(avg.getValue(), IsEqual.equalTo(5.0));
}
private Set<RollupJobCaps> singletonSet(RollupJobCaps cap) {
Set<RollupJobCaps> caps = new HashSet<>();
caps.add(cap);
return caps;
}
}

View File

@ -155,6 +155,14 @@ public class ConfigTests extends ESTestCase {
assertThat(e.getMessage(), equalTo("Parameter [field] is mandatory."));
}
public void testEmptyDateHistoInterval() {
DateHistoGroupConfig.Builder config = ConfigTestHelpers.getDateHisto();
config.setField("foo");
config.setInterval(null);
Exception e = expectThrows(IllegalArgumentException.class, config::build);
assertThat(e.getMessage(), equalTo("Parameter [interval] is mandatory."));
}
public void testNullTimeZone() {
DateHistoGroupConfig.Builder config = ConfigTestHelpers.getDateHisto();
config.setTimeZone(null);

View File

@ -381,7 +381,7 @@ public class IndexerUtilsTests extends AggregatorTestCase {
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig.build(), "foo");
assertThat(docs.size(), equalTo(1));
assertThat(docs.get(0).id(), equalTo("2233159223"));
assertThat(docs.get(0).id(), equalTo("1237859798"));
}
interface Mock {

View File

@ -0,0 +1,26 @@
{
"xpack.rollup.rollup_search": {
"documentation": "",
"methods": [ "GET", "POST" ],
"url": {
"path": "/{index}/_rollup_search",
"paths": [ "{index}/_rollup_search", "{index}/{type}/_rollup_search" ],
"parts": {
"index": {
"type": "string",
"required": true,
"description": "The index or index-pattern (containing rollup or regular data) that should be searched"
},
"type": {
"type": "string",
"required": false,
"description": "The doc type inside the index"
}
}
},
"body": {
"description" : "The search request body",
"required" : true
}
}
}

View File

@ -0,0 +1,658 @@
setup:
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
xpack.rollup.put_job:
id: foo
body: >
{
"index_pattern": "foo",
"rollup_index": "foo_rollup",
"cron": "*/30 * * * * ?",
"size" :10,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h"
},
"terms": {
"fields": ["partition"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["max"]
}
]
}
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
bulk:
refresh: true
body:
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 1
partition.terms.value: "a"
partition.terms._count: 1
price.max.value: 1
"_rollup.id": "foo"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 2
partition.terms.value: "b"
partition.terms._count: 2
price.max.value: 2
"_rollup.id": "foo"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "b"
partition.terms._count: 10
price.max.value: 4
"_rollup.id": "foo"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
---
"Basic Search":
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 2 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 20 }
---
"Search with Metric":
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
aggs:
the_max:
max:
field: "price"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.0.the_max.value: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 2 }
- match: { aggregations.histo.buckets.1.the_max.value: 2 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.2.the_max.value: 3 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 20 }
- match: { aggregations.histo.buckets.3.the_max.value: 4 }
---
"Search with Query":
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
query:
term:
partition: a
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
aggs:
the_max:
max:
field: "price"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.0.the_max.value: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 0 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.2.the_max.value: 3 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 10 }
- match: { aggregations.histo.buckets.3.the_max.value: 3 }
---
"Search with MatchAll and Second Job":
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
xpack.rollup.put_job:
id: foo2
body: >
{
"index_pattern": "foo",
"rollup_index": "foo_rollup",
"cron": "*/30 * * * * ?",
"size" :10,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h"
},
"terms": {
"fields": ["partition"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["max"]
}
]
}
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
bulk:
refresh: true
body:
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 1
partition.terms.value: "a"
partition.terms._count: 1
price.max.value: 1
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 2
partition.terms.value: "b"
partition.terms._count: 2
price.max.value: 2
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "b"
partition.terms._count: 10
price.max.value: 4
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
aggs:
the_max:
max:
field: "price"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.0.the_max.value: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 2 }
- match: { aggregations.histo.buckets.1.the_max.value: 2 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.2.the_max.value: 3 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 20 }
- match: { aggregations.histo.buckets.3.the_max.value: 4 }
---
"Search with Query and Second Job":
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
xpack.rollup.put_job:
id: foo2
body: >
{
"index_pattern": "foo",
"rollup_index": "foo_rollup",
"cron": "*/30 * * * * ?",
"size" :10,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h"
},
"terms": {
"fields": ["partition"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["max"]
}
]
}
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
bulk:
refresh: true
body:
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 1
partition.terms.value: "a"
partition.terms._count: 1
price.max.value: 1
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 2
partition.terms.value: "b"
partition.terms._count: 2
price.max.value: 2
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "b"
partition.terms._count: 10
price.max.value: 4
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
query:
term:
partition: a
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
aggs:
the_max:
max:
field: "price"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.0.the_max.value: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 0 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.2.the_max.value: 3 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 10 }
- match: { aggregations.histo.buckets.3.the_max.value: 3 }
---
"Search with Query and Second Job different intervals":
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
xpack.rollup.put_job:
id: foo2
body: >
{
"index_pattern": "foo",
"rollup_index": "foo_rollup",
"cron": "*/30 * * * * ?",
"size" :10,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1d"
},
"terms": {
"fields": ["partition"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["max"]
}
]
}
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
bulk:
refresh: true
body:
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 1
partition.terms.value: "a"
partition.terms._count: 1
price.max.value: 1
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 2
partition.terms.value: "b"
partition.terms._count: 2
price.max.value: 2
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "b"
partition.terms._count: 10
price.max.value: 4
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- index:
_index: "foo_rollup"
_type: "rollup"
- timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z"
timestamp.date_histogram.interval: "1h"
timestamp.date_histogram.time_zone: "UTC"
timestamp.date_histogram._count: 10
partition.terms.value: "a"
partition.terms._count: 10
price.max.value: 3
"_rollup.id": "foo2"
"_rollup.computed":
- "timestamp.date_histogram"
- "partition.terms"
"_rollup.version": 1
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
query:
term:
partition: a
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
aggs:
the_max:
max:
field: "price"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.0.the_max.value: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 0 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.2.the_max.value: 3 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 10 }
- match: { aggregations.histo.buckets.3.the_max.value: 3 }