Merge pull request #12518 from martijnvg/top_hits/bug/protected_against_crazy_size

Protected against  `size` and `offset` larger than total number of document in a shard
This commit is contained in:
Martijn van Groningen 2015-07-29 10:33:27 +02:00
commit 5d7ed70fa0
2 changed files with 20 additions and 0 deletions

View File

@ -117,6 +117,9 @@ public class TopHitsAggregator extends MetricsAggregator {
if (collectors == null) {
Sort sort = subSearchContext.sort();
int topN = subSearchContext.from() + subSearchContext.size();
// In the QueryPhase we don't need this protection, because it is build into the IndexSearcher,
// but here we create collectors ourselves and we need prevent OOM because of crazy an offset and size.
topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc());
TopDocsCollector<?> topLevelCollector = sort != null ? TopFieldCollector.create(sort, topN, true, subSearchContext.trackScores(), subSearchContext.trackScores()) : TopScoreDocCollector.create(topN);
collectors = new TopDocsAndLeafCollector(topLevelCollector);
collectors.leafCollector = collectors.topLevelCollector.getLeafCollector(ctx);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
@ -928,4 +929,20 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
}
}
}
@Test
public void testDontExplode() throws Exception {
SearchResponse response = client()
.prepareSearch("idx")
.setTypes("type")
.addAggregation(terms("terms")
.executionHint(randomExecutionHint())
.field(TERMS_AGGS_FIELD)
.subAggregation(
topHits("hits").setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1).addSort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))
)
)
.get();
assertNoFailures(response);
}
}