Simplify number of shards setting (#30783)

This is code that was leftover from the move to one shard by
default. Here in index metadata we were preserving the default number of
shards settings independently of the area of code where we set this
value on an index that does not explicitly have an number of shards
setting. This took into consideration the es.index.max_number_of_shards
system property, and was used in search requests to set the default
maximum number of concurrent shard requests. We set the default there
based on the default number of shards so that in a one-node case a
search request could concurrently hit all shards on an index with the
defaults. Now that we default to one shard, we expect fewer shards in
clusters and this adjustment of the node count as the max number of
concurrent shard requests is no longer needed. This commit then changes
the default number of shards settings to be consistent with the value
used when an index is created, and removes the now unneeded adjustment
in search requests.
This commit is contained in:
Jason Tedor 2018-05-22 14:33:16 -04:00 committed by GitHub
parent a17d6cab98
commit 2984734197
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 7 additions and 9 deletions

View File

@ -341,13 +341,12 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
return searchTransportService.getConnection(clusterName, discoveryNode); return searchTransportService.getConnection(clusterName, discoveryNode);
}; };
if (searchRequest.isMaxConcurrentShardRequestsSet() == false) { if (searchRequest.isMaxConcurrentShardRequestsSet() == false) {
// we try to set a default of max concurrent shard requests based on /*
// the node count but upper-bound it by 256 by default to keep it sane. A single * We try to set a default of max concurrent shard requests based on the node count but upper-bound it by 256 by default to keep
// search request that fans out lots of shards should hit a cluster too hard while 256 is already a lot. * it sane. A single search request that fans out to lots of shards should not hit a cluster too hard while 256 is already a
// we multiply it by the default number of shards such that a single request in a cluster of 1 would hit all shards of a * lot.
// default index. */
searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount));
* IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getDefault(Settings.EMPTY)));
} }
boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators); boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators);
searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(), searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(),

View File

@ -181,8 +181,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
if (maxNumShards < 1) { if (maxNumShards < 1) {
throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0");
} }
return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards, return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 1, 1, maxNumShards, Property.IndexScope, Property.Final);
Property.IndexScope, Property.Final);
} }
public static final String INDEX_SETTING_PREFIX = "index."; public static final String INDEX_SETTING_PREFIX = "index.";