Add an index setting to limit the maximum number of slices allowed in a scroll request (default to 1024).

This commit is contained in:
Jim Ferenczi 2016-06-08 15:28:06 +02:00
parent 7229c91289
commit 439b2a96e5
8 changed files with 126 additions and 13 deletions

View File

@ -116,6 +116,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexSettings.ALLOW_UNMAPPED,
IndexSettings.INDEX_CHECK_ON_STARTUP,
IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD,
IndexSettings.MAX_SLICES_PER_SCROLL,
ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING,
IndexSettings.INDEX_GC_DELETES_SETTING,
IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING,

View File

@ -121,6 +121,12 @@ public final class IndexSettings {
public static final Setting<Integer> MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0,
Property.Dynamic, Property.IndexScope);
/**
* The maximum number of slices allowed in a scroll request
*/
public static final Setting<Integer> MAX_SLICES_PER_SCROLL = Setting.intSetting("index.max_slices_per_scroll",
1024, 1, Property.Dynamic, Property.IndexScope);
private final Index index;
private final Version version;
private final ESLogger logger;
@ -154,6 +160,11 @@ public final class IndexSettings {
* The maximum number of refresh listeners allows on this shard.
*/
private volatile int maxRefreshListeners;
/**
* The maximum number of slices allowed in a scroll request.
*/
private volatile int maxSlicesPerScroll;
/**
* Returns the default search field for this index.
@ -239,6 +250,7 @@ public final class IndexSettings {
maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING);
TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING);
maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
assert indexNameMatcher.test(indexMetaData.getIndex().getName());
@ -262,6 +274,7 @@ public final class IndexSettings {
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize);
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
}
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
@ -521,5 +534,16 @@ public final class IndexSettings {
this.maxRefreshListeners = maxRefreshListeners;
}
/**
* The maximum number of slices allowed in a scroll request.
*/
public int getMaxSlicesPerScroll() {
return maxSlicesPerScroll;
}
private void setMaxSlicesPerScroll(int value) {
this.maxSlicesPerScroll = value;
}
IndexScopedSettings getScopedSettings() { return scopedSettings;}
}

View File

@ -826,9 +826,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
if (context.scrollContext() == null) {
throw new SearchContextException(context, "`slice` cannot be used outside of a scroll context");
}
context.sliceFilter(source.slice().toFilter(queryShardContext,
context.shardTarget().getShardId().getId(),
queryShardContext.getIndexSettings().getNumberOfShards()));
context.sliceBuilder(source.slice());
}
}

View File

@ -68,6 +68,7 @@ import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@ -116,7 +117,7 @@ public class DefaultSearchContext extends SearchContext {
private boolean trackScores = false; // when sorting, track scores as well...
private FieldDoc searchAfter;
// filter for sliced scroll
private Query sliceFilter;
private SliceBuilder sliceBuilder;
/**
* The original query as sent by the user without the types and aliases
@ -212,13 +213,23 @@ public class DefaultSearchContext extends SearchContext {
if (rescoreContext.window() > maxWindow) {
throw new QueryPhaseExecutionException(this, "Rescore window [" + rescoreContext.window() + "] is too large. It must "
+ "be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results to be "
+ "rescored. This limit can be set by chaining the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
+ "rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
+ "] index level setting.");
}
}
}
if (sliceBuilder != null) {
int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll();
int numSlices = sliceBuilder.getMax();
if (numSlices > sliceLimit) {
throw new QueryPhaseExecutionException(this, "The number of slices [" + numSlices + "] is too large. It must "
+ "be less than [" + sliceLimit + "]. This limit can be set by changing the [" +
IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting.");
}
}
// initialize the filtering alias based on the provided filters
aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases());
@ -257,9 +268,11 @@ public class DefaultSearchContext extends SearchContext {
@Nullable
public Query searchFilter(String[] types) {
Query typesFilter = createSearchFilter(types, aliasFilter, mapperService().hasNested());
if (sliceFilter == null) {
if (sliceBuilder == null) {
return typesFilter;
}
Query sliceFilter = sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(),
queryShardContext.getIndexSettings().getNumberOfShards());
if (typesFilter == null) {
return sliceFilter;
}
@ -562,8 +575,8 @@ public class DefaultSearchContext extends SearchContext {
return searchAfter;
}
public SearchContext sliceFilter(Query filter) {
this.sliceFilter = filter;
public SearchContext sliceBuilder(SliceBuilder sliceBuilder) {
this.sliceBuilder = sliceBuilder;
return this;
}

View File

@ -443,6 +443,6 @@ public class SimpleSearchIT extends ESIntegTestCase {
assertThat(e.toString(), containsString("Rescore window [" + windowSize + "] is too large. It must "
+ "be less than [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY)));
assertThat(e.toString(), containsString(
"This limit can be set by chaining the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting."));
"This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting."));
}
}

View File

@ -35,7 +35,6 @@ import org.elasticsearch.test.ESIntegTestCase;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
import java.util.HashSet;
import java.util.concurrent.ExecutionException;
@ -43,7 +42,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.startsWith;
public class SearchSliceIT extends ESIntegTestCase {
@ -71,7 +69,8 @@ public class SearchSliceIT extends ESIntegTestCase {
.endObject().string();
int numberOfShards = randomIntBetween(1, 7);
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings("number_of_shards", numberOfShards)
.setSettings("number_of_shards", numberOfShards,
"index.max_slices_per_scroll", 10000)
.addMapping("type", mapping));
ensureGreen();

View File

@ -263,4 +263,7 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
'
--------------------------------------------------
For append only time-based indices, the `timestamp` field can be used safely.
For append only time-based indices, the `timestamp` field can be used safely.
NOTE: By default the maximum number of slices allowed per scroll is limited to 1024.
You can update the `index.max_slices_per_scroll` index setting to bypass this limit.

View File

@ -0,0 +1,75 @@
---
"Sliced scroll":
- do:
indices.create:
index: test_sliced_scroll
- do:
index:
index: test_sliced_scroll
type: test
id: 42
body: { foo: 1 }
- do:
indices.refresh: {}
- do:
search:
index: test_sliced_scroll
size: 1
scroll: 1m
sort: foo
body:
slice: {
id: 0,
max: 3
}
query:
match_all: {}
- set: {_scroll_id: scroll_id}
- do:
clear_scroll:
scroll_id: $scroll_id
- do:
catch: /query_phase_execution_exception.*The number of slices.*index.max_slices_per_scroll/
search:
index: test_sliced_scroll
size: 1
scroll: 1m
body:
slice: {
id: 0,
max: 1025
}
query:
match_all: {}
- do:
indices.put_settings:
index: test_sliced_scroll
body:
index.max_slices_per_scroll: 1025
- do:
search:
index: test_sliced_scroll
size: 1
scroll: 1m
body:
slice: {
id: 0,
max: 1025
}
query:
match_all: {}
- set: {_scroll_id: scroll_id}
- do:
clear_scroll:
scroll_id: $scroll_id