Raise IllegalArgumentException if query validation failed (#26811)

Closes #26799
This commit is contained in:
kel 2017-10-31 06:17:27 -05:00 committed by Luca Cavanna
parent a4c159e91e
commit c3e2bdf20c
9 changed files with 286 additions and 24 deletions

View File

@ -24,7 +24,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector; import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter; import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchTask;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
@ -81,7 +80,6 @@ import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ExecutorService;
final class DefaultSearchContext extends SearchContext { final class DefaultSearchContext extends SearchContext {
@ -200,29 +198,28 @@ final class DefaultSearchContext extends SearchContext {
if (resultWindow > maxResultWindow) { if (resultWindow > maxResultWindow) {
if (scrollContext == null) { if (scrollContext == null) {
throw new QueryPhaseExecutionException(this, throw new IllegalArgumentException(
"Result window is too large, from + size must be less than or equal to: [" + maxResultWindow + "] but was [" "Result window is too large, from + size must be less than or equal to: [" + maxResultWindow + "] but was ["
+ resultWindow + "]. See the scroll api for a more efficient way to request large data sets. " + resultWindow + "]. See the scroll api for a more efficient way to request large data sets. "
+ "This limit can be set by changing the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "This limit can be set by changing the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey()
+ "] index level setting."); + "] index level setting.");
} }
throw new QueryPhaseExecutionException(this, throw new IllegalArgumentException(
"Batch size is too large, size must be less than or equal to: [" + maxResultWindow + "] but was [" + resultWindow "Batch size is too large, size must be less than or equal to: [" + maxResultWindow + "] but was [" + resultWindow
+ "]. Scroll batch sizes cost as much memory as result windows so they are controlled by the [" + "]. Scroll batch sizes cost as much memory as result windows so they are controlled by the ["
+ IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting."); + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting.");
} }
if (rescore != null) { if (rescore != null) {
if (sort != null) { if (sort != null) {
throw new QueryPhaseExecutionException(this, "Cannot use [sort] option in conjunction with [rescore]."); throw new IllegalArgumentException("Cannot use [sort] option in conjunction with [rescore].");
} }
int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow(); int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow();
for (RescoreContext rescoreContext: rescore) { for (RescoreContext rescoreContext: rescore) {
if (rescoreContext.getWindowSize() > maxWindow) { if (rescoreContext.getWindowSize() > maxWindow) {
throw new QueryPhaseExecutionException(this, "Rescore window [" + rescoreContext.getWindowSize() + "] is too large. " throw new IllegalArgumentException("Rescore window [" + rescoreContext.getWindowSize() + "] is too large. "
+ "It must be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results " + "It must be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results "
+ "to be rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "to be rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
+ "] index level setting."); + "] index level setting.");
} }
} }
} }
@ -231,7 +228,7 @@ final class DefaultSearchContext extends SearchContext {
int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll(); int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll();
int numSlices = sliceBuilder.getMax(); int numSlices = sliceBuilder.getMax();
if (numSlices > sliceLimit) { if (numSlices > sliceLimit) {
throw new QueryPhaseExecutionException(this, "The number of slices [" + numSlices + "] is too large. It must " throw new IllegalArgumentException("The number of slices [" + numSlices + "] is too large. It must "
+ "be less than [" + sliceLimit + "]. This limit can be set by changing the [" + + "be less than [" + sliceLimit + "]. This limit can be set by changing the [" +
IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting."); IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting.");
} }

View File

@ -650,7 +650,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
private void contextScrollKeepAlive(SearchContext context, long keepAlive) throws IOException { private void contextScrollKeepAlive(SearchContext context, long keepAlive) throws IOException {
if (keepAlive > maxKeepAlive) { if (keepAlive > maxKeepAlive) {
throw new QueryPhaseExecutionException(context, throw new IllegalArgumentException(
"Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive).format() + ") is too large. " + "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive).format() + ") is too large. " +
"It must be less than (" + TimeValue.timeValueMillis(maxKeepAlive).format() + "). " + "It must be less than (" + TimeValue.timeValueMillis(maxKeepAlive).format() + "). " +
"This limit can be set by changing the [" + MAX_KEEPALIVE_SETTING.getKey() + "] cluster level setting."); "This limit can be set by changing the [" + MAX_KEEPALIVE_SETTING.getKey() + "] cluster level setting.");

View File

@ -189,7 +189,7 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde
throws IOException { throws IOException {
int maxFilters = context.indexShard().indexSettings().getMaxAdjacencyMatrixFilters(); int maxFilters = context.indexShard().indexSettings().getMaxAdjacencyMatrixFilters();
if (filters.size() > maxFilters){ if (filters.size() > maxFilters){
throw new QueryPhaseExecutionException(context, throw new IllegalArgumentException(
"Number of filters is too large, must be less than or equal to: [" + maxFilters + "] but was [" "Number of filters is too large, must be less than or equal to: [" + maxFilters + "] but was ["
+ filters.size() + "]." + filters.size() + "]."
+ "This limit can be set by changing the [" + IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING.getKey() + "This limit can be set by changing the [" + IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING.getKey()

View File

@ -0,0 +1,178 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.Sort;
import org.apache.lucene.store.Directory;
import org.elasticsearch.Version;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.rescore.RescoreContext;
import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.test.ESTestCase;
import java.util.UUID;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class DefaultSearchContextTests extends ESTestCase {
public void testPreProcess() throws Exception {
TimeValue timeout = new TimeValue(randomIntBetween(1, 100));
ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class);
when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT);
ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1);
when(shardSearchRequest.shardId()).thenReturn(shardId);
when(shardSearchRequest.types()).thenReturn(new String[]{});
IndexShard indexShard = mock(IndexShard.class);
QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class);
when(indexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy);
int maxResultWindow = randomIntBetween(50, 100);
int maxRescoreWindow = randomIntBetween(50, 100);
int maxSlicesPerScroll = randomIntBetween(50, 100);
Settings settings = Settings.builder()
.put("index.max_result_window", maxResultWindow)
.put("index.max_slices_per_scroll", maxSlicesPerScroll)
.put("index.max_rescore_window", maxRescoreWindow)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
.build();
IndexService indexService = mock(IndexService.class);
IndexCache indexCache = mock(IndexCache.class);
QueryCache queryCache = mock(QueryCache.class);
when(indexCache.query()).thenReturn(queryCache);
when(indexService.cache()).thenReturn(indexCache);
QueryShardContext queryShardContext = mock(QueryShardContext.class);
when(indexService.newQueryShardContext(eq(shardId.id()), anyObject(), anyObject(), anyString())).thenReturn(queryShardContext);
MapperService mapperService = mock(MapperService.class);
when(mapperService.hasNested()).thenReturn(randomBoolean());
when(indexService.mapperService()).thenReturn(mapperService);
IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build();
IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
when(indexService.getIndexSettings()).thenReturn(indexSettings);
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
IndexReader reader = w.getReader();
Engine.Searcher searcher = new Engine.Searcher("test", new IndexSearcher(reader))) {
DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, indexService,
indexShard, bigArrays, null, timeout, null, null);
context1.from(300);
// resultWindow greater than maxResultWindow and scrollContext is null
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Result window is too large, from + size must be less than or equal to:"
+ " [" + maxResultWindow + "] but was [310]. See the scroll api for a more efficient way to request large data sets. "
+ "This limit can be set by changing the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey()
+ "] index level setting."));
// resultWindow greater than maxResultWindow and scrollContext isn't null
context1.scrollContext(new ScrollContext());
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Batch size is too large, size must be less than or equal to: ["
+ maxResultWindow + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are "
+ "controlled by the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting."));
// resultWindow not greater than maxResultWindow and both rescore and sort are not null
context1.from(0);
DocValueFormat docValueFormat = mock(DocValueFormat.class);
SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[]{docValueFormat});
context1.sort(sortAndFormats);
RescoreContext rescoreContext = mock(RescoreContext.class);
when(rescoreContext.getWindowSize()).thenReturn(500);
context1.addRescore(rescoreContext);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Cannot use [sort] option in conjunction with [rescore]."));
// rescore is null but sort is not null and rescoreContext.getWindowSize() exceeds maxResultWindow
context1.sort(null);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Rescore window [" + rescoreContext.getWindowSize() + "] is too large. "
+ "It must be less than [" + maxRescoreWindow + "]. This prevents allocating massive heaps for storing the results "
+ "to be rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
+ "] index level setting."));
// rescore is null but sliceBuilder is not null
DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, indexService,
indexShard, bigArrays, null, timeout, null, null);
SliceBuilder sliceBuilder = mock(SliceBuilder.class);
int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100);
when(sliceBuilder.getMax()).thenReturn(numSlices);
context2.sliceBuilder(sliceBuilder);
exception = expectThrows(IllegalArgumentException.class, () -> context2.preProcess(false));
assertThat(exception.getMessage(), equalTo("The number of slices [" + numSlices + "] is too large. It must "
+ "be less than [" + maxSlicesPerScroll + "]. This limit can be set by changing the [" +
IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting."));
// No exceptions should be thrown
when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY);
when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST);
DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, indexService,
indexShard, bigArrays, null, timeout, null, null);
ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery();
context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false);
assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query()));
}
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.adjacency;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.TestSearchContext;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AdjacencyMatrixAggregationBuilderTests extends ESTestCase {
public void testFilterSizeLimitation() throws Exception {
// filter size grater than max size should thrown a exception
QueryShardContext queryShardContext = mock(QueryShardContext.class);
IndexShard indexShard = mock(IndexShard.class);
Settings settings = Settings.builder()
.put("index.max_adjacency_matrix_filters", 2)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
.build();
IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build();
IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
when(indexShard.indexSettings()).thenReturn(indexSettings);
SearchContext context = new TestSearchContext(queryShardContext, indexShard);
Map<String, QueryBuilder> filters = new HashMap<>(3);
for (int i = 0; i < 3; i++) {
QueryBuilder queryBuilder = mock(QueryBuilder.class);
// return builder itself to skip rewrite
when(queryBuilder.rewrite(queryShardContext)).thenReturn(queryBuilder);
filters.put("filter" + i, queryBuilder);
}
AdjacencyMatrixAggregationBuilder builder = new AdjacencyMatrixAggregationBuilder("dummy", filters);
IllegalArgumentException ex
= expectThrows(IllegalArgumentException.class, () -> builder.doBuild(context, null, new AggregatorFactories.Builder()));
assertThat(ex.getMessage(), equalTo("Number of filters is too large, must be less than or equal to: [2] but was [3]."
+ "This limit can be set by changing the [" + IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING.getKey()
+ "] index level setting."));
// filter size not grater than max size should return an instance of AdjacencyMatrixAggregatorFactory
Map<String, QueryBuilder> emptyFilters = Collections.emptyMap();
AdjacencyMatrixAggregationBuilder aggregationBuilder = new AdjacencyMatrixAggregationBuilder("dummy", emptyFilters);
AggregatorFactory<?> factory = aggregationBuilder.doBuild(context, null, new AggregatorFactories.Builder());
assertThat(factory instanceof AdjacencyMatrixAggregatorFactory, is(true));
assertThat(factory.name(), equalTo("dummy"));
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.scroll; package org.elasticsearch.search.scroll;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder;
@ -37,7 +36,6 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -575,10 +573,10 @@ public class SearchScrollIT extends ESIntegTestCase {
.setSize(1) .setSize(1)
.setScroll(TimeValue.timeValueHours(2)) .setScroll(TimeValue.timeValueHours(2))
.execute().actionGet()); .execute().actionGet());
QueryPhaseExecutionException queryPhaseExecutionException = IllegalArgumentException illegalArgumentException =
(QueryPhaseExecutionException) ExceptionsHelper.unwrap(exc, QueryPhaseExecutionException.class); (IllegalArgumentException) ExceptionsHelper.unwrap(exc, IllegalArgumentException.class);
assertNotNull(queryPhaseExecutionException); assertNotNull(illegalArgumentException);
assertThat(queryPhaseExecutionException.getMessage(), containsString("Keep alive for scroll (2 hours) is too large")); assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (2 hours) is too large"));
SearchResponse searchResponse = client().prepareSearch() SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchAllQuery()) .setQuery(matchAllQuery())
@ -592,10 +590,10 @@ public class SearchScrollIT extends ESIntegTestCase {
exc = expectThrows(Exception.class, exc = expectThrows(Exception.class,
() -> client().prepareSearchScroll(searchResponse.getScrollId()) () -> client().prepareSearchScroll(searchResponse.getScrollId())
.setScroll(TimeValue.timeValueHours(3)).get()); .setScroll(TimeValue.timeValueHours(3)).get());
queryPhaseExecutionException = illegalArgumentException =
(QueryPhaseExecutionException) ExceptionsHelper.unwrap(exc, QueryPhaseExecutionException.class); (IllegalArgumentException) ExceptionsHelper.unwrap(exc, IllegalArgumentException.class);
assertNotNull(queryPhaseExecutionException); assertNotNull(illegalArgumentException);
assertThat(queryPhaseExecutionException.getMessage(), containsString("Keep alive for scroll (3 hours) is too large")); assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (3 hours) is too large"));
} }
private void assertToXContentResponse(ClearScrollResponse response, boolean succeed, int numFreed) throws IOException { private void assertToXContentResponse(ClearScrollResponse response, boolean succeed, int numFreed) throws IOException {

View File

@ -22,3 +22,7 @@ PUT /_cluster/settings
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
=== `_search/scroll` returns `400` for invalid requests
The `/_search/scroll` endpoint returns `400 - Bad request` when the request invalid, while it would previously
return `500 - Internal Server Error` in such case.

View File

@ -103,8 +103,12 @@ setup:
--- ---
"Sliced scroll with invalid arguments": "Sliced scroll with invalid arguments":
- skip:
version: " - 6.99.99"
reason: Prior versions return 500 rather than 404
- do: - do:
catch: /query_phase_execution_exception.*The number of slices.*index.max_slices_per_scroll/ catch: bad_request
search: search:
index: test_sliced_scroll index: test_sliced_scroll
size: 1 size: 1

View File

@ -24,15 +24,12 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.util.Counter; import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchTask;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ObjectMapper;