Merge branch 'master' into index-lifecycle

This commit is contained in:
Gordon Brown 2018-08-23 14:56:25 -06:00
commit 935b28087b
10 changed files with 149 additions and 32 deletions

View File

@ -30,6 +30,10 @@ in similar way to the <<query-dsl-multi-match-query,multi match query>>
[WARNING]
Note that the usage of `/_termvector` is deprecated in 2.0, and replaced by `/_termvectors`.
[WARNING]
Term Vectors API doesn't work on nested fields. `/_termvectors` on a nested
field and any sub-fields of a nested field returns empty results.
[float]
=== Return values

View File

@ -0,0 +1,49 @@
setup:
- do:
indices.create:
index: testidx
body:
mappings:
_doc:
properties:
nested1:
type : nested
properties:
nested1-text:
type: text
object1:
properties:
object1-text:
type: text
object1-nested1:
type: nested
properties:
object1-nested1-text:
type: text
- do:
index:
index: testidx
type: _doc
id: 1
body:
"nested1" : [{ "nested1-text": "text1" }]
"object1" : [{ "object1-text": "text2" }, "object1-nested1" : [{"object1-nested1-text" : "text3"}]]
- do:
indices.refresh: {}
---
"Termvectors on nested fields should return empty results":
- do:
termvectors:
index: testidx
type: _doc
id: 1
fields: ["nested1", "nested1.nested1-text", "object1.object1-nested1", "object1.object1-nested1.object1-nested1-text", "object1.object1-text"]
- is_false: term_vectors.nested1
- is_false: term_vectors.nested1\.nested1-text # escaping as the field name contains dot
- is_false: term_vectors.object1\.object1-nested1
- is_false: term_vectors.object1\.object1-nested1\.object1-nested1-text
- is_true: term_vectors.object1\.object1-text

View File

@ -122,6 +122,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
public static final int V_5_6_11_ID = 5061199;
public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
public static final int V_5_6_12_ID = 5061299;
public static final Version V_5_6_12 = new Version(V_5_6_12_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
public static final int V_6_0_0_alpha1_ID = 6000001;
public static final Version V_6_0_0_alpha1 =
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
@ -174,10 +176,10 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
public static final int V_6_3_2_ID = 6030299;
public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
public static final int V_6_3_3_ID = 6030399;
public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
public static final int V_6_4_0_ID = 6040099;
public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
public static final int V_6_4_1_ID = 6040199;
public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
public static final int V_6_5_0_ID = 6050099;
public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0);
public static final int V_7_0_0_alpha1_ID = 7000001;
@ -200,10 +202,10 @@ public class Version implements Comparable<Version>, ToXContentFragment {
return V_7_0_0_alpha1;
case V_6_5_0_ID:
return V_6_5_0;
case V_6_4_1_ID:
return V_6_4_1;
case V_6_4_0_ID:
return V_6_4_0;
case V_6_3_3_ID:
return V_6_3_3;
case V_6_3_2_ID:
return V_6_3_2;
case V_6_3_1_ID:
@ -246,6 +248,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
return V_6_0_0_alpha2;
case V_6_0_0_alpha1_ID:
return V_6_0_0_alpha1;
case V_5_6_12_ID:
return V_5_6_12;
case V_5_6_11_ID:
return V_5_6_11;
case V_5_6_10_ID:

View File

@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceFieldMapper;
@ -160,7 +161,7 @@ public class TermVectorsService {
request.selectedFields(fieldNames.toArray(Strings.EMPTY_ARRAY));
}
private static boolean isValidField(MappedFieldType fieldType) {
private static boolean isValidField(MappedFieldType fieldType, IndexShard indexShard) {
// must be a string
if (fieldType instanceof StringFieldType == false) {
return false;
@ -169,6 +170,16 @@ public class TermVectorsService {
if (fieldType.indexOptions() == IndexOptions.NONE) {
return false;
}
// and must not be under nested field
int dotIndex = fieldType.name().indexOf('.');
while (dotIndex > -1) {
String parentField = fieldType.name().substring(0, dotIndex);
ObjectMapper mapper = indexShard.mapperService().getObjectMapper(parentField);
if (mapper != null && mapper.nested().isNested()) {
return false;
}
dotIndex = fieldType.name().indexOf('.', dotIndex + 1);
}
return true;
}
@ -177,7 +188,7 @@ public class TermVectorsService {
Set<String> validFields = new HashSet<>();
for (String field : selectedFields) {
MappedFieldType fieldType = indexShard.mapperService().fullName(field);
if (!isValidField(fieldType)) {
if (isValidField(fieldType, indexShard) == false) {
continue;
}
// already retrieved, only if the analyzer hasn't been overridden at the field
@ -284,7 +295,7 @@ public class TermVectorsService {
Collection<DocumentField> documentFields = new HashSet<>();
for (IndexableField field : doc.getFields()) {
MappedFieldType fieldType = indexShard.mapperService().fullName(field.name());
if (!isValidField(fieldType)) {
if (isValidField(fieldType, indexShard) == false) {
continue;
}
if (request.selectedFields() != null && !request.selectedFields().contains(field.name())) {

View File

@ -101,6 +101,7 @@ GET /sensor_rollup/_rollup_search
--------------------------------------------------
// CONSOLE
// TEST[setup:sensor_prefab_data]
// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/]
The query is targeting the `sensor_rollup` data, since this contains the rollup data as configured in the job. A `max`
aggregation has been used on the `temperature` field, yielding the following response:
@ -194,6 +195,7 @@ GET sensor-1,sensor_rollup/_rollup_search <1>
--------------------------------------------------
// CONSOLE
// TEST[continued]
// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/]
<1> Note the URI now searches `sensor-1` and `sensor_rollup` at the same time
When the search is executed, the Rollup Search endpoint will do two things:

View File

@ -238,11 +238,23 @@ public class RollupResponseTranslator {
? (InternalAggregations)liveResponse.getAggregations()
: InternalAggregations.EMPTY;
rolledResponses.forEach(r -> {
if (r == null || r.getAggregations() == null || r.getAggregations().asList().size() == 0) {
throw new RuntimeException("Expected to find aggregations in rollup response, but none found.");
int missingRollupAggs = rolledResponses.stream().mapToInt(searchResponse -> {
if (searchResponse == null
|| searchResponse.getAggregations() == null
|| searchResponse.getAggregations().asList().size() == 0) {
return 1;
}
});
return 0;
}).sum();
// We had no rollup aggs, so there is nothing to process
if (missingRollupAggs == rolledResponses.size()) {
// Return an empty response, but make sure we include all the shard, failure, etc stats
return mergeFinalResponse(liveResponse, rolledResponses, InternalAggregations.EMPTY);
} else if (missingRollupAggs > 0 && missingRollupAggs != rolledResponses.size()) {
// We were missing some but not all the aggs, unclear how to handle this. Bail.
throw new RuntimeException("Expected to find aggregations in rollup response, but none found.");
}
// The combination process returns a tree that is identical to the non-rolled
// which means we can use aggregation's reduce method to combine, just as if
@ -275,27 +287,39 @@ public class RollupResponseTranslator {
new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true));
}
// TODO allow profiling in the future
InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), currentTree, null, null,
rolledResponses.stream().anyMatch(SearchResponse::isTimedOut),
rolledResponses.stream().anyMatch(SearchResponse::isTimedOut),
rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum());
return mergeFinalResponse(liveResponse, rolledResponses, currentTree);
}
private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, List<SearchResponse> rolledResponses,
InternalAggregations aggs) {
int totalShards = rolledResponses.stream().mapToInt(SearchResponse::getTotalShards).sum();
int sucessfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum();
int skippedShards = rolledResponses.stream().mapToInt(SearchResponse::getSkippedShards).sum();
long took = rolledResponses.stream().mapToLong(r -> r.getTook().getMillis()).sum() ;
boolean isTimedOut = rolledResponses.stream().anyMatch(SearchResponse::isTimedOut);
boolean isTerminatedEarly = rolledResponses.stream()
.filter(r -> r.isTerminatedEarly() != null)
.anyMatch(SearchResponse::isTerminatedEarly);
int numReducePhases = rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum();
if (liveResponse != null) {
totalShards += liveResponse.getTotalShards();
sucessfulShards += liveResponse.getSuccessfulShards();
skippedShards += liveResponse.getSkippedShards();
took = Math.max(took, liveResponse.getTook().getMillis());
isTimedOut = isTimedOut && liveResponse.isTimedOut();
isTerminatedEarly = isTerminatedEarly && liveResponse.isTerminatedEarly();
numReducePhases += liveResponse.getNumReducePhases();
}
InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), aggs, null, null,
isTimedOut, isTerminatedEarly, numReducePhases);
// Shard failures are ignored atm, so returning an empty array is fine
return new SearchResponse(combinedInternal, null, totalShards, sucessfulShards, skippedShards,
took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters());
took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters());
}
/**

View File

@ -155,6 +155,18 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
rolledSearchSource.size(0);
AggregatorFactories.Builder sourceAgg = request.source().aggregations();
// If there are no aggs in the request, our translation won't create any msearch.
// So just add an dummy request to the msearch and return. This is a bit silly
// but maintains how the regular search API behaves
if (sourceAgg == null || sourceAgg.count() == 0) {
// Note: we can't apply any query rewriting or filtering on the query because there
// are no validated caps, so we have no idea what job is intended here. The only thing
// this affects is doc count, since hits and aggs will both be empty it doesn't really matter.
msearch.add(new SearchRequest(context.getRollupIndices(), request.source()).types(request.types()));
return msearch;
}
// Find our list of "best" job caps
Set<RollupJobCaps> validatedCaps = new HashSet<>();
sourceAgg.getAggregatorFactories()
@ -248,11 +260,6 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
if (request.source().explain() != null && request.source().explain()) {
throw new IllegalArgumentException("Rollup search does not support explaining.");
}
// Rollup is only useful if aggregations are set, throw an exception otherwise
if (request.source().aggregations() == null) {
throw new IllegalArgumentException("Rollup requires at least one aggregation to be set.");
}
}
static QueryBuilder rewriteQuery(QueryBuilder builder, Set<RollupJobCaps> jobCaps) {

View File

@ -198,10 +198,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
ScriptService scriptService = mock(ScriptService.class);
Exception e = expectThrows(RuntimeException.class,
() -> RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true)));
assertThat(e.getMessage(), equalTo("Expected to find aggregations in rollup response, but none found."));
SearchResponse response = RollupResponseTranslator.combineResponses(msearch,
new InternalAggregation.ReduceContext(bigArrays, scriptService, true));
assertNotNull(response);
Aggregations responseAggs = response.getAggregations();
assertThat(responseAggs.asList().size(), equalTo(0));
}
public void testMissingRolledIndex() {

View File

@ -307,21 +307,22 @@ public class SearchActionTests extends ESTestCase {
assertThat(e.getMessage(), equalTo("Rollup search does not support explaining."));
}
public void testNoAgg() {
String[] normalIndices = new String[]{randomAlphaOfLength(10)};
public void testNoRollupAgg() {
String[] normalIndices = new String[]{};
String[] rollupIndices = new String[]{randomAlphaOfLength(10)};
TransportRollupSearchAction.RollupSearchContext ctx
= new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet());
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(new MatchAllQueryBuilder());
source.size(0);
SearchRequest request = new SearchRequest(normalIndices, source);
SearchRequest request = new SearchRequest(rollupIndices, source);
NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class);
Exception e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx));
assertThat(e.getMessage(), equalTo("Rollup requires at least one aggregation to be set."));
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry, ctx);
assertThat(msearch.requests().size(), equalTo(1));
assertThat(msearch.requests().get(0), equalTo(request));
}
public void testNoLiveNoRollup() {
String[] normalIndices = new String[0];
String[] rollupIndices = new String[0];

View File

@ -152,6 +152,20 @@ setup:
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 20 }
---
"Empty aggregation":
- do:
xpack.rollup.rollup_search:
index: "foo_rollup"
body:
size: 0
aggs: {}
- length: { hits.hits: 0 }
- match: { hits.total: 0 }
- is_false: aggregations
---
"Search with Metric":