Aggregations: Do not take deleted documents into account in aggregations filters.
Since aggregators are only called on documents that match the query, it never gets called on deleted documents, so by specifying `null` as live docs, we very likely remove a BitsFilteredDocIdSet layer. Close #8540
This commit is contained in:
parent
cca5934e9d
commit
f30a0e846d
|
@ -51,7 +51,7 @@ public class FilterAggregator extends SingleBucketAggregator {
|
|||
@Override
|
||||
public void setNextReader(LeafReaderContext reader) {
|
||||
try {
|
||||
bits = DocIdSets.toSafeBits(reader.reader(), filter.getDocIdSet(reader, reader.reader().getLiveDocs()));
|
||||
bits = DocIdSets.toSafeBits(reader.reader(), filter.getDocIdSet(reader, null));
|
||||
} catch (IOException ioe) {
|
||||
throw new AggregationExecutionException("Failed to aggregate filter aggregator [" + name + "]", ioe);
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class FiltersAggregator extends BucketsAggregator {
|
|||
public void setNextReader(LeafReaderContext reader) {
|
||||
try {
|
||||
for (int i = 0; i < filters.length; i++) {
|
||||
bits[i] = DocIdSets.toSafeBits(reader.reader(), filters[i].filter.getDocIdSet(reader, reader.reader().getLiveDocs()));
|
||||
bits[i] = DocIdSets.toSafeBits(reader.reader(), filters[i].filter.getDocIdSet(reader, null));
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new AggregationExecutionException("Failed to aggregate filter aggregator [" + name + "]", ioe);
|
||||
|
|
|
@ -64,12 +64,17 @@ public class FilterTests extends ElasticsearchIntegrationTest {
|
|||
.endObject()));
|
||||
}
|
||||
for (int i = numTag1Docs; i < numDocs; i++) {
|
||||
builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
|
||||
IndexRequestBuilder req = client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
|
||||
.startObject()
|
||||
.field("value", i)
|
||||
.field("tag", "tag2")
|
||||
.field("name", "name" + i)
|
||||
.endObject()));
|
||||
.endObject());
|
||||
builders.add(req);
|
||||
if (randomBoolean()) {
|
||||
// randomly index the document twice so that we have deleted docs that match the filter
|
||||
builders.add(req);
|
||||
}
|
||||
}
|
||||
prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
|
|
|
@ -60,20 +60,29 @@ public class FiltersTests extends ElasticsearchIntegrationTest {
|
|||
numTag1Docs = randomIntBetween(1, numDocs - 1);
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
for (int i = 0; i < numTag1Docs; i++) {
|
||||
builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
|
||||
IndexRequestBuilder req = client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
|
||||
.startObject()
|
||||
.field("value", i + 1)
|
||||
.field("tag", "tag1")
|
||||
.endObject()));
|
||||
.endObject());
|
||||
builders.add(req);
|
||||
if (randomBoolean()) {
|
||||
// randomly index the document twice so that we have deleted docs that match the filter
|
||||
builders.add(req);
|
||||
}
|
||||
}
|
||||
for (int i = numTag1Docs; i < numDocs; i++) {
|
||||
numTag2Docs++;
|
||||
builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
|
||||
IndexRequestBuilder req = client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
|
||||
.startObject()
|
||||
.field("value", i)
|
||||
.field("tag", "tag2")
|
||||
.field("name", "name" + i)
|
||||
.endObject()));
|
||||
.endObject());
|
||||
builders.add(req);
|
||||
if (randomBoolean()) {
|
||||
builders.add(req);
|
||||
}
|
||||
}
|
||||
prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
|
|
Loading…
Reference in New Issue