Simplify mapping validation for resizing indices. (#58514)
When creating a target index from a source index, we don't allow for target mappings to be specified. This PR simplifies the check that the target mappings are empty. This refactor will help when implementing composable template merging, since we no longer need to resolve + check the target mappings when creating an index from a template.
This commit is contained in:
parent
add8ff1ad3
commit
1f2e05c947
|
@ -475,7 +475,7 @@ public class MetadataCreateIndexService {
|
|||
xContentRegistry));
|
||||
|
||||
final Settings aggregatedIndexSettings =
|
||||
aggregateIndexSettings(currentState, request, MetadataIndexTemplateService.resolveSettings(templates), mappings,
|
||||
aggregateIndexSettings(currentState, request, MetadataIndexTemplateService.resolveSettings(templates),
|
||||
null, settings, indexScopedSettings, shardLimitValidator);
|
||||
int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null);
|
||||
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
||||
|
@ -519,7 +519,7 @@ public class MetadataCreateIndexService {
|
|||
final Settings aggregatedIndexSettings =
|
||||
aggregateIndexSettings(currentState, request,
|
||||
MetadataIndexTemplateService.resolveSettings(currentState.metadata(), templateName),
|
||||
mappings, null, settings, indexScopedSettings, shardLimitValidator);
|
||||
null, settings, indexScopedSettings, shardLimitValidator);
|
||||
int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null);
|
||||
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
||||
|
||||
|
@ -549,22 +549,17 @@ public class MetadataCreateIndexService {
|
|||
throws Exception {
|
||||
logger.info("applying create index request using existing index [{}] metadata", sourceMetadata.getIndex().getName());
|
||||
|
||||
final Map<String, Map<String, Object>> mappings;
|
||||
if (request.mappings().size() == 0) {
|
||||
mappings = Collections.emptyMap();
|
||||
} else {
|
||||
assert request.mappings().size() == 1 : "expected source metadata mappings to have 1 type but it had: " + request.mappings();
|
||||
String sourceMappings = request.mappings().values().iterator().next();
|
||||
mappings = Collections.singletonMap(MapperService.SINGLE_MAPPING_NAME,
|
||||
Collections.unmodifiableMap(MapperService.parseMapping(xContentRegistry, sourceMappings)));
|
||||
if (request.mappings().size() > 0) {
|
||||
throw new IllegalArgumentException("mappings are not allowed when creating an index from a source index, " +
|
||||
"all mappings are copied from the source index");
|
||||
}
|
||||
|
||||
final Settings aggregatedIndexSettings = aggregateIndexSettings(currentState, request, Settings.EMPTY, mappings, sourceMetadata,
|
||||
settings, indexScopedSettings, shardLimitValidator);
|
||||
final Settings aggregatedIndexSettings = aggregateIndexSettings(currentState, request, Settings.EMPTY,
|
||||
sourceMetadata, settings, indexScopedSettings, shardLimitValidator);
|
||||
final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata);
|
||||
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
||||
|
||||
return applyCreateIndexWithTemporaryService(currentState, request, silent, sourceMetadata, tmpImd, mappings,
|
||||
return applyCreateIndexWithTemporaryService(currentState, request, silent, sourceMetadata, tmpImd, Collections.emptyMap(),
|
||||
indexService -> resolveAndValidateAliases(request.index(), request.aliases(), Collections.emptyList(),
|
||||
currentState.metadata(), aliasValidator, xContentRegistry,
|
||||
// the context is only used for validation so it's fine to pass fake values for the
|
||||
|
@ -726,8 +721,7 @@ public class MetadataCreateIndexService {
|
|||
* @return the aggregated settings for the new index
|
||||
*/
|
||||
static Settings aggregateIndexSettings(ClusterState currentState, CreateIndexClusterStateUpdateRequest request,
|
||||
Settings templateSettings, Map<String, Map<String, Object>> mappings,
|
||||
@Nullable IndexMetadata sourceMetadata, Settings settings,
|
||||
Settings templateSettings, @Nullable IndexMetadata sourceMetadata, Settings settings,
|
||||
IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator) {
|
||||
Settings.Builder indexSettingsBuilder = Settings.builder();
|
||||
if (sourceMetadata == null) {
|
||||
|
@ -766,7 +760,6 @@ public class MetadataCreateIndexService {
|
|||
assert request.resizeType() != null;
|
||||
prepareResizeIndexSettings(
|
||||
currentState,
|
||||
mappings.keySet(),
|
||||
indexSettingsBuilder,
|
||||
request.recoverFrom(),
|
||||
request.index(),
|
||||
|
@ -1077,10 +1070,8 @@ public class MetadataCreateIndexService {
|
|||
*
|
||||
* @return the list of nodes at least one instance of the source index shards are allocated
|
||||
*/
|
||||
static List<String> validateShrinkIndex(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
||||
static List<String> validateShrinkIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings);
|
||||
assert INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings);
|
||||
IndexMetadata.selectShrinkShards(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||
|
||||
|
@ -1110,10 +1101,8 @@ public class MetadataCreateIndexService {
|
|||
return nodesToAllocateOn;
|
||||
}
|
||||
|
||||
static void validateSplitIndex(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
||||
static void validateSplitIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings);
|
||||
IndexMetadata.selectSplitShard(0, sourceMetadata, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||
if (sourceMetadata.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// ensure we have a single type since this would make the splitting code considerably more complex
|
||||
|
@ -1123,16 +1112,12 @@ public class MetadataCreateIndexService {
|
|||
}
|
||||
}
|
||||
|
||||
static void validateCloneIndex(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
||||
static void validateCloneIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings);
|
||||
IndexMetadata.selectCloneShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||
}
|
||||
|
||||
static IndexMetadata validateResize(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
static IndexMetadata validateResize(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||
if (state.metadata().hasIndex(targetIndexName)) {
|
||||
throw new ResourceAlreadyExistsException(state.metadata().index(targetIndexName).getIndex());
|
||||
}
|
||||
|
@ -1154,12 +1139,6 @@ public class MetadataCreateIndexService {
|
|||
throw new IllegalStateException("index " + sourceIndex + " must be read-only to resize index. use \"index.blocks.write=true\"");
|
||||
}
|
||||
|
||||
if ((targetIndexMappingsTypes.size() > 1 ||
|
||||
(targetIndexMappingsTypes.isEmpty() || targetIndexMappingsTypes.contains(MapperService.DEFAULT_MAPPING)) == false)) {
|
||||
throw new IllegalArgumentException("mappings are not allowed when resizing indices" +
|
||||
", all mappings are copied from the source index");
|
||||
}
|
||||
|
||||
if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||
// this method applies all necessary checks ie. if the target shards are less than the source shards
|
||||
// of if the source shards are divisible by the number of target shards
|
||||
|
@ -1171,14 +1150,12 @@ public class MetadataCreateIndexService {
|
|||
|
||||
static void prepareResizeIndexSettings(
|
||||
final ClusterState currentState,
|
||||
final Set<String> mappingKeys,
|
||||
final Settings.Builder indexSettingsBuilder,
|
||||
final Index resizeSourceIndex,
|
||||
final String resizeIntoName,
|
||||
final ResizeType type,
|
||||
final boolean copySettings,
|
||||
final IndexScopedSettings indexScopedSettings) {
|
||||
|
||||
// we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away
|
||||
// once we are allocated.
|
||||
final String initialRecoveryIdFilter = IndexMetadata.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id";
|
||||
|
@ -1186,13 +1163,13 @@ public class MetadataCreateIndexService {
|
|||
final IndexMetadata sourceMetadata = currentState.metadata().index(resizeSourceIndex.getName());
|
||||
if (type == ResizeType.SHRINK) {
|
||||
final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(),
|
||||
mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||
resizeIntoName, indexSettingsBuilder.build());
|
||||
indexSettingsBuilder.put(initialRecoveryIdFilter, Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()));
|
||||
} else if (type == ResizeType.SPLIT) {
|
||||
validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||
validateSplitIndex(currentState, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build());
|
||||
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||
} else if (type == ResizeType.CLONE) {
|
||||
validateCloneIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||
validateCloneIndex(currentState, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build());
|
||||
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||
} else {
|
||||
throw new IllegalStateException("unknown resize type is " + type);
|
||||
|
|
|
@ -188,52 +188,44 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
|
||||
assertEquals("index [source] already exists",
|
||||
expectThrows(ResourceAlreadyExistsException.class, () ->
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "target", Collections.emptySet(), "source", Settings.EMPTY)
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "target", "source", Settings.EMPTY)
|
||||
).getMessage());
|
||||
|
||||
assertEquals("no such index [no_such_index]",
|
||||
expectThrows(IndexNotFoundException.class, () ->
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "no_such_index", Collections.emptySet(), "target", Settings.EMPTY)
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "no_such_index", "target", Settings.EMPTY)
|
||||
).getMessage());
|
||||
|
||||
Settings targetSettings = Settings.builder().put("index.number_of_shards", 1).build();
|
||||
assertEquals("can't shrink an index with only one shard",
|
||||
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateShrinkIndex(createClusterState("source",
|
||||
1, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
||||
Collections.emptySet(), "target", targetSettings)).getMessage());
|
||||
1, 0, Settings.builder().put("index.blocks.write", true).build()), "source", "target", targetSettings)).getMessage());
|
||||
|
||||
assertEquals("the number of target shards [10] must be less that the number of source shards [5]",
|
||||
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateShrinkIndex(createClusterState("source",
|
||||
5, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
||||
Collections.emptySet(), "target", Settings.builder().put("index.number_of_shards", 10).build())).getMessage());
|
||||
"target", Settings.builder().put("index.number_of_shards", 10).build())).getMessage());
|
||||
|
||||
|
||||
assertEquals("index source must be read-only to resize index. use \"index.blocks.write=true\"",
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
MetadataCreateIndexService.validateShrinkIndex(
|
||||
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY)
|
||||
, "source", Collections.emptySet(), "target", targetSettings)
|
||||
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY),
|
||||
"source", "target", targetSettings)
|
||||
).getMessage());
|
||||
|
||||
assertEquals("index source must have all shards allocated on the same node to shrink index",
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "source", Collections.emptySet(), "target", targetSettings)
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "source", "target", targetSettings)
|
||||
|
||||
).getMessage());
|
||||
assertEquals("the number of source shards [8] must be a multiple of [3]",
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
MetadataCreateIndexService.validateShrinkIndex(createClusterState("source", 8, randomIntBetween(0, 10),
|
||||
Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target",
|
||||
Settings.builder().put("index.blocks.write", true).build()), "source", "target",
|
||||
Settings.builder().put("index.number_of_shards", 3).build())
|
||||
).getMessage());
|
||||
|
||||
assertEquals("mappings are not allowed when resizing indices, all mappings are copied from the source index",
|
||||
expectThrows(IllegalArgumentException.class, () -> {
|
||||
MetadataCreateIndexService.validateShrinkIndex(state, "source", singleton("foo"),
|
||||
"target", targetSettings);
|
||||
}
|
||||
).getMessage());
|
||||
|
||||
// create one that won't fail
|
||||
ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0,
|
||||
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
|
||||
|
@ -251,7 +243,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
do {
|
||||
targetShards = randomIntBetween(1, numShards/2);
|
||||
} while (isShrinkable(numShards, targetShards) == false);
|
||||
MetadataCreateIndexService.validateShrinkIndex(clusterState, "source", Collections.emptySet(), "target",
|
||||
MetadataCreateIndexService.validateShrinkIndex(clusterState, "source", "target",
|
||||
Settings.builder().put("index.number_of_shards", targetShards).build());
|
||||
}
|
||||
|
||||
|
@ -263,17 +255,17 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
|
||||
assertEquals("index [source] already exists",
|
||||
expectThrows(ResourceAlreadyExistsException.class, () ->
|
||||
MetadataCreateIndexService.validateSplitIndex(state, "target", Collections.emptySet(), "source", targetSettings)
|
||||
MetadataCreateIndexService.validateSplitIndex(state, "target", "source", targetSettings)
|
||||
).getMessage());
|
||||
|
||||
assertEquals("no such index [no_such_index]",
|
||||
expectThrows(IndexNotFoundException.class, () ->
|
||||
MetadataCreateIndexService.validateSplitIndex(state, "no_such_index", Collections.emptySet(), "target", targetSettings)
|
||||
MetadataCreateIndexService.validateSplitIndex(state, "no_such_index", "target", targetSettings)
|
||||
).getMessage());
|
||||
|
||||
assertEquals("the number of source shards [10] must be less that the number of target shards [5]",
|
||||
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateSplitIndex(createClusterState("source",
|
||||
10, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(),
|
||||
10, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
||||
"target", Settings.builder().put("index.number_of_shards", 5).build())
|
||||
).getMessage());
|
||||
|
||||
|
@ -282,24 +274,17 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
expectThrows(IllegalStateException.class, () ->
|
||||
MetadataCreateIndexService.validateSplitIndex(
|
||||
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY)
|
||||
, "source", Collections.emptySet(), "target", targetSettings)
|
||||
, "source", "target", targetSettings)
|
||||
).getMessage());
|
||||
|
||||
|
||||
assertEquals("the number of source shards [3] must be a factor of [4]",
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
MetadataCreateIndexService.validateSplitIndex(createClusterState("source", 3, randomIntBetween(0, 10),
|
||||
Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target",
|
||||
Settings.builder().put("index.blocks.write", true).build()), "source", "target",
|
||||
Settings.builder().put("index.number_of_shards", 4).build())
|
||||
).getMessage());
|
||||
|
||||
assertEquals("mappings are not allowed when resizing indices, all mappings are copied from the source index",
|
||||
expectThrows(IllegalArgumentException.class, () -> {
|
||||
MetadataCreateIndexService.validateSplitIndex(state, "source", singleton("foo"),
|
||||
"target", targetSettings);
|
||||
}
|
||||
).getMessage());
|
||||
|
||||
int targetShards;
|
||||
do {
|
||||
targetShards = randomIntBetween(numShards+1, 100);
|
||||
|
@ -317,7 +302,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||
|
||||
MetadataCreateIndexService.validateSplitIndex(clusterState, "source", Collections.emptySet(), "target",
|
||||
MetadataCreateIndexService.validateSplitIndex(clusterState, "source", "target",
|
||||
Settings.builder().put("index.number_of_shards", targetShards).build());
|
||||
}
|
||||
|
||||
|
@ -463,7 +448,6 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
.collect(Collectors.toSet());
|
||||
MetadataCreateIndexService.prepareResizeIndexSettings(
|
||||
clusterState,
|
||||
Collections.emptySet(),
|
||||
indexSettingsBuilder,
|
||||
clusterState.metadata().index(indexName).getIndex(),
|
||||
"target",
|
||||
|
@ -633,7 +617,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
.build();
|
||||
request.settings(Settings.builder().put("request_setting", "value2").build());
|
||||
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(), emptyMap(),
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(),
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
|
||||
assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1"));
|
||||
|
@ -671,7 +655,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
MetadataIndexTemplateService.resolveAliases(Collections.singletonList(templateMetadata)),
|
||||
Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, templateMetadata.settings(),
|
||||
emptyMap(), null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
|
||||
assertThat(resolvedAliases.get(0).getSearchRouting(), equalTo("fromRequest"));
|
||||
assertThat(aggregatedIndexSettings.get("key1"), equalTo("requestValue"));
|
||||
|
@ -686,14 +670,14 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDefaultSettings() {
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, emptyMap(),
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
|
||||
assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("1"));
|
||||
}
|
||||
|
||||
public void testSettingsFromClusterState() {
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, emptyMap(),
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||
null, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 15).build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
randomShardLimitService());
|
||||
|
||||
|
@ -718,8 +702,8 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
.putAlias(AliasMetadata.builder("alias1").searchRouting("1").build())
|
||||
));
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request,
|
||||
MetadataIndexTemplateService.resolveSettings(templates), emptyMap(),
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
MetadataIndexTemplateService.resolveSettings(templates), null, Settings.EMPTY,
|
||||
IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
List<AliasMetadata> resolvedAliases = resolveAndValidateAliases(request.index(), request.aliases(),
|
||||
MetadataIndexTemplateService.resolveAliases(templates),
|
||||
Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
|
||||
|
@ -745,7 +729,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
createClusterState("sourceIndex", 1, 0,
|
||||
Settings.builder().put("index.blocks.write", true).build());
|
||||
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(), emptyMap(),
|
||||
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(),
|
||||
clusterState.metadata().index("sourceIndex"), Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
randomShardLimitService());
|
||||
|
||||
|
@ -933,10 +917,11 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
assertThat(targetRoutingNumberOfShards, is(6));
|
||||
}
|
||||
|
||||
|
||||
public void testSoftDeletesDisabledDeprecation() {
|
||||
request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test");
|
||||
request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false).build());
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, Collections.emptyMap(),
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
assertWarnings("Creating indices with soft-deletes disabled is deprecated and will be removed in future Elasticsearch versions. "
|
||||
+ "Please do not specify value for setting [index.soft_deletes.enabled] of index [test].");
|
||||
|
@ -944,7 +929,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true).build());
|
||||
}
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, Collections.emptyMap(),
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
}
|
||||
|
||||
|
@ -957,7 +942,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
|||
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 128) + "mb");
|
||||
}
|
||||
request.settings(settings.build());
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, Collections.emptyMap(),
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||
assertWarnings("Translog retention settings [index.translog.retention.age] "
|
||||
+ "and [index.translog.retention.size] are deprecated and effectively ignored. They will be removed in a future version.");
|
||||
|
|
Loading…
Reference in New Issue