Simplify mapping validation for resizing indices. (#58514)
When creating a target index from a source index, we don't allow for target mappings to be specified. This PR simplifies the check that the target mappings are empty. This refactor will help when implementing composable template merging, since we no longer need to resolve + check the target mappings when creating an index from a template.
This commit is contained in:
parent
add8ff1ad3
commit
1f2e05c947
|
@ -475,7 +475,7 @@ public class MetadataCreateIndexService {
|
||||||
xContentRegistry));
|
xContentRegistry));
|
||||||
|
|
||||||
final Settings aggregatedIndexSettings =
|
final Settings aggregatedIndexSettings =
|
||||||
aggregateIndexSettings(currentState, request, MetadataIndexTemplateService.resolveSettings(templates), mappings,
|
aggregateIndexSettings(currentState, request, MetadataIndexTemplateService.resolveSettings(templates),
|
||||||
null, settings, indexScopedSettings, shardLimitValidator);
|
null, settings, indexScopedSettings, shardLimitValidator);
|
||||||
int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null);
|
int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null);
|
||||||
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
||||||
|
@ -519,7 +519,7 @@ public class MetadataCreateIndexService {
|
||||||
final Settings aggregatedIndexSettings =
|
final Settings aggregatedIndexSettings =
|
||||||
aggregateIndexSettings(currentState, request,
|
aggregateIndexSettings(currentState, request,
|
||||||
MetadataIndexTemplateService.resolveSettings(currentState.metadata(), templateName),
|
MetadataIndexTemplateService.resolveSettings(currentState.metadata(), templateName),
|
||||||
mappings, null, settings, indexScopedSettings, shardLimitValidator);
|
null, settings, indexScopedSettings, shardLimitValidator);
|
||||||
int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null);
|
int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null);
|
||||||
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
||||||
|
|
||||||
|
@ -549,22 +549,17 @@ public class MetadataCreateIndexService {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
logger.info("applying create index request using existing index [{}] metadata", sourceMetadata.getIndex().getName());
|
logger.info("applying create index request using existing index [{}] metadata", sourceMetadata.getIndex().getName());
|
||||||
|
|
||||||
final Map<String, Map<String, Object>> mappings;
|
if (request.mappings().size() > 0) {
|
||||||
if (request.mappings().size() == 0) {
|
throw new IllegalArgumentException("mappings are not allowed when creating an index from a source index, " +
|
||||||
mappings = Collections.emptyMap();
|
"all mappings are copied from the source index");
|
||||||
} else {
|
|
||||||
assert request.mappings().size() == 1 : "expected source metadata mappings to have 1 type but it had: " + request.mappings();
|
|
||||||
String sourceMappings = request.mappings().values().iterator().next();
|
|
||||||
mappings = Collections.singletonMap(MapperService.SINGLE_MAPPING_NAME,
|
|
||||||
Collections.unmodifiableMap(MapperService.parseMapping(xContentRegistry, sourceMappings)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
final Settings aggregatedIndexSettings = aggregateIndexSettings(currentState, request, Settings.EMPTY, mappings, sourceMetadata,
|
final Settings aggregatedIndexSettings = aggregateIndexSettings(currentState, request, Settings.EMPTY,
|
||||||
settings, indexScopedSettings, shardLimitValidator);
|
sourceMetadata, settings, indexScopedSettings, shardLimitValidator);
|
||||||
final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata);
|
final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata);
|
||||||
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards);
|
||||||
|
|
||||||
return applyCreateIndexWithTemporaryService(currentState, request, silent, sourceMetadata, tmpImd, mappings,
|
return applyCreateIndexWithTemporaryService(currentState, request, silent, sourceMetadata, tmpImd, Collections.emptyMap(),
|
||||||
indexService -> resolveAndValidateAliases(request.index(), request.aliases(), Collections.emptyList(),
|
indexService -> resolveAndValidateAliases(request.index(), request.aliases(), Collections.emptyList(),
|
||||||
currentState.metadata(), aliasValidator, xContentRegistry,
|
currentState.metadata(), aliasValidator, xContentRegistry,
|
||||||
// the context is only used for validation so it's fine to pass fake values for the
|
// the context is only used for validation so it's fine to pass fake values for the
|
||||||
|
@ -726,8 +721,7 @@ public class MetadataCreateIndexService {
|
||||||
* @return the aggregated settings for the new index
|
* @return the aggregated settings for the new index
|
||||||
*/
|
*/
|
||||||
static Settings aggregateIndexSettings(ClusterState currentState, CreateIndexClusterStateUpdateRequest request,
|
static Settings aggregateIndexSettings(ClusterState currentState, CreateIndexClusterStateUpdateRequest request,
|
||||||
Settings templateSettings, Map<String, Map<String, Object>> mappings,
|
Settings templateSettings, @Nullable IndexMetadata sourceMetadata, Settings settings,
|
||||||
@Nullable IndexMetadata sourceMetadata, Settings settings,
|
|
||||||
IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator) {
|
IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator) {
|
||||||
Settings.Builder indexSettingsBuilder = Settings.builder();
|
Settings.Builder indexSettingsBuilder = Settings.builder();
|
||||||
if (sourceMetadata == null) {
|
if (sourceMetadata == null) {
|
||||||
|
@ -766,7 +760,6 @@ public class MetadataCreateIndexService {
|
||||||
assert request.resizeType() != null;
|
assert request.resizeType() != null;
|
||||||
prepareResizeIndexSettings(
|
prepareResizeIndexSettings(
|
||||||
currentState,
|
currentState,
|
||||||
mappings.keySet(),
|
|
||||||
indexSettingsBuilder,
|
indexSettingsBuilder,
|
||||||
request.recoverFrom(),
|
request.recoverFrom(),
|
||||||
request.index(),
|
request.index(),
|
||||||
|
@ -1077,10 +1070,8 @@ public class MetadataCreateIndexService {
|
||||||
*
|
*
|
||||||
* @return the list of nodes at least one instance of the source index shards are allocated
|
* @return the list of nodes at least one instance of the source index shards are allocated
|
||||||
*/
|
*/
|
||||||
static List<String> validateShrinkIndex(ClusterState state, String sourceIndex,
|
static List<String> validateShrinkIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings);
|
||||||
Settings targetIndexSettings) {
|
|
||||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
|
||||||
assert INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings);
|
assert INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings);
|
||||||
IndexMetadata.selectShrinkShards(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
IndexMetadata.selectShrinkShards(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||||
|
|
||||||
|
@ -1110,10 +1101,8 @@ public class MetadataCreateIndexService {
|
||||||
return nodesToAllocateOn;
|
return nodesToAllocateOn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void validateSplitIndex(ClusterState state, String sourceIndex,
|
static void validateSplitIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings);
|
||||||
Settings targetIndexSettings) {
|
|
||||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
|
||||||
IndexMetadata.selectSplitShard(0, sourceMetadata, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
IndexMetadata.selectSplitShard(0, sourceMetadata, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||||
if (sourceMetadata.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
if (sourceMetadata.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// ensure we have a single type since this would make the splitting code considerably more complex
|
// ensure we have a single type since this would make the splitting code considerably more complex
|
||||||
|
@ -1123,16 +1112,12 @@ public class MetadataCreateIndexService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void validateCloneIndex(ClusterState state, String sourceIndex,
|
static void validateCloneIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings);
|
||||||
Settings targetIndexSettings) {
|
|
||||||
IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
|
||||||
IndexMetadata.selectCloneShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
IndexMetadata.selectCloneShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||||
}
|
}
|
||||||
|
|
||||||
static IndexMetadata validateResize(ClusterState state, String sourceIndex,
|
static IndexMetadata validateResize(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) {
|
||||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
|
||||||
Settings targetIndexSettings) {
|
|
||||||
if (state.metadata().hasIndex(targetIndexName)) {
|
if (state.metadata().hasIndex(targetIndexName)) {
|
||||||
throw new ResourceAlreadyExistsException(state.metadata().index(targetIndexName).getIndex());
|
throw new ResourceAlreadyExistsException(state.metadata().index(targetIndexName).getIndex());
|
||||||
}
|
}
|
||||||
|
@ -1154,12 +1139,6 @@ public class MetadataCreateIndexService {
|
||||||
throw new IllegalStateException("index " + sourceIndex + " must be read-only to resize index. use \"index.blocks.write=true\"");
|
throw new IllegalStateException("index " + sourceIndex + " must be read-only to resize index. use \"index.blocks.write=true\"");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((targetIndexMappingsTypes.size() > 1 ||
|
|
||||||
(targetIndexMappingsTypes.isEmpty() || targetIndexMappingsTypes.contains(MapperService.DEFAULT_MAPPING)) == false)) {
|
|
||||||
throw new IllegalArgumentException("mappings are not allowed when resizing indices" +
|
|
||||||
", all mappings are copied from the source index");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||||
// this method applies all necessary checks ie. if the target shards are less than the source shards
|
// this method applies all necessary checks ie. if the target shards are less than the source shards
|
||||||
// of if the source shards are divisible by the number of target shards
|
// of if the source shards are divisible by the number of target shards
|
||||||
|
@ -1171,14 +1150,12 @@ public class MetadataCreateIndexService {
|
||||||
|
|
||||||
static void prepareResizeIndexSettings(
|
static void prepareResizeIndexSettings(
|
||||||
final ClusterState currentState,
|
final ClusterState currentState,
|
||||||
final Set<String> mappingKeys,
|
|
||||||
final Settings.Builder indexSettingsBuilder,
|
final Settings.Builder indexSettingsBuilder,
|
||||||
final Index resizeSourceIndex,
|
final Index resizeSourceIndex,
|
||||||
final String resizeIntoName,
|
final String resizeIntoName,
|
||||||
final ResizeType type,
|
final ResizeType type,
|
||||||
final boolean copySettings,
|
final boolean copySettings,
|
||||||
final IndexScopedSettings indexScopedSettings) {
|
final IndexScopedSettings indexScopedSettings) {
|
||||||
|
|
||||||
// we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away
|
// we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away
|
||||||
// once we are allocated.
|
// once we are allocated.
|
||||||
final String initialRecoveryIdFilter = IndexMetadata.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id";
|
final String initialRecoveryIdFilter = IndexMetadata.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id";
|
||||||
|
@ -1186,13 +1163,13 @@ public class MetadataCreateIndexService {
|
||||||
final IndexMetadata sourceMetadata = currentState.metadata().index(resizeSourceIndex.getName());
|
final IndexMetadata sourceMetadata = currentState.metadata().index(resizeSourceIndex.getName());
|
||||||
if (type == ResizeType.SHRINK) {
|
if (type == ResizeType.SHRINK) {
|
||||||
final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(),
|
final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(),
|
||||||
mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
resizeIntoName, indexSettingsBuilder.build());
|
||||||
indexSettingsBuilder.put(initialRecoveryIdFilter, Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()));
|
indexSettingsBuilder.put(initialRecoveryIdFilter, Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()));
|
||||||
} else if (type == ResizeType.SPLIT) {
|
} else if (type == ResizeType.SPLIT) {
|
||||||
validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
validateSplitIndex(currentState, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build());
|
||||||
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||||
} else if (type == ResizeType.CLONE) {
|
} else if (type == ResizeType.CLONE) {
|
||||||
validateCloneIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
validateCloneIndex(currentState, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build());
|
||||||
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalStateException("unknown resize type is " + type);
|
throw new IllegalStateException("unknown resize type is " + type);
|
||||||
|
|
|
@ -188,52 +188,44 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
|
|
||||||
assertEquals("index [source] already exists",
|
assertEquals("index [source] already exists",
|
||||||
expectThrows(ResourceAlreadyExistsException.class, () ->
|
expectThrows(ResourceAlreadyExistsException.class, () ->
|
||||||
MetadataCreateIndexService.validateShrinkIndex(state, "target", Collections.emptySet(), "source", Settings.EMPTY)
|
MetadataCreateIndexService.validateShrinkIndex(state, "target", "source", Settings.EMPTY)
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
assertEquals("no such index [no_such_index]",
|
assertEquals("no such index [no_such_index]",
|
||||||
expectThrows(IndexNotFoundException.class, () ->
|
expectThrows(IndexNotFoundException.class, () ->
|
||||||
MetadataCreateIndexService.validateShrinkIndex(state, "no_such_index", Collections.emptySet(), "target", Settings.EMPTY)
|
MetadataCreateIndexService.validateShrinkIndex(state, "no_such_index", "target", Settings.EMPTY)
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
Settings targetSettings = Settings.builder().put("index.number_of_shards", 1).build();
|
Settings targetSettings = Settings.builder().put("index.number_of_shards", 1).build();
|
||||||
assertEquals("can't shrink an index with only one shard",
|
assertEquals("can't shrink an index with only one shard",
|
||||||
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateShrinkIndex(createClusterState("source",
|
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateShrinkIndex(createClusterState("source",
|
||||||
1, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
1, 0, Settings.builder().put("index.blocks.write", true).build()), "source", "target", targetSettings)).getMessage());
|
||||||
Collections.emptySet(), "target", targetSettings)).getMessage());
|
|
||||||
|
|
||||||
assertEquals("the number of target shards [10] must be less that the number of source shards [5]",
|
assertEquals("the number of target shards [10] must be less that the number of source shards [5]",
|
||||||
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateShrinkIndex(createClusterState("source",
|
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateShrinkIndex(createClusterState("source",
|
||||||
5, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
5, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
||||||
Collections.emptySet(), "target", Settings.builder().put("index.number_of_shards", 10).build())).getMessage());
|
"target", Settings.builder().put("index.number_of_shards", 10).build())).getMessage());
|
||||||
|
|
||||||
|
|
||||||
assertEquals("index source must be read-only to resize index. use \"index.blocks.write=true\"",
|
assertEquals("index source must be read-only to resize index. use \"index.blocks.write=true\"",
|
||||||
expectThrows(IllegalStateException.class, () ->
|
expectThrows(IllegalStateException.class, () ->
|
||||||
MetadataCreateIndexService.validateShrinkIndex(
|
MetadataCreateIndexService.validateShrinkIndex(
|
||||||
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY)
|
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY),
|
||||||
, "source", Collections.emptySet(), "target", targetSettings)
|
"source", "target", targetSettings)
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
assertEquals("index source must have all shards allocated on the same node to shrink index",
|
assertEquals("index source must have all shards allocated on the same node to shrink index",
|
||||||
expectThrows(IllegalStateException.class, () ->
|
expectThrows(IllegalStateException.class, () ->
|
||||||
MetadataCreateIndexService.validateShrinkIndex(state, "source", Collections.emptySet(), "target", targetSettings)
|
MetadataCreateIndexService.validateShrinkIndex(state, "source", "target", targetSettings)
|
||||||
|
|
||||||
).getMessage());
|
).getMessage());
|
||||||
assertEquals("the number of source shards [8] must be a multiple of [3]",
|
assertEquals("the number of source shards [8] must be a multiple of [3]",
|
||||||
expectThrows(IllegalArgumentException.class, () ->
|
expectThrows(IllegalArgumentException.class, () ->
|
||||||
MetadataCreateIndexService.validateShrinkIndex(createClusterState("source", 8, randomIntBetween(0, 10),
|
MetadataCreateIndexService.validateShrinkIndex(createClusterState("source", 8, randomIntBetween(0, 10),
|
||||||
Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target",
|
Settings.builder().put("index.blocks.write", true).build()), "source", "target",
|
||||||
Settings.builder().put("index.number_of_shards", 3).build())
|
Settings.builder().put("index.number_of_shards", 3).build())
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
assertEquals("mappings are not allowed when resizing indices, all mappings are copied from the source index",
|
|
||||||
expectThrows(IllegalArgumentException.class, () -> {
|
|
||||||
MetadataCreateIndexService.validateShrinkIndex(state, "source", singleton("foo"),
|
|
||||||
"target", targetSettings);
|
|
||||||
}
|
|
||||||
).getMessage());
|
|
||||||
|
|
||||||
// create one that won't fail
|
// create one that won't fail
|
||||||
ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0,
|
ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0,
|
||||||
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
|
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
|
||||||
|
@ -251,7 +243,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
do {
|
do {
|
||||||
targetShards = randomIntBetween(1, numShards/2);
|
targetShards = randomIntBetween(1, numShards/2);
|
||||||
} while (isShrinkable(numShards, targetShards) == false);
|
} while (isShrinkable(numShards, targetShards) == false);
|
||||||
MetadataCreateIndexService.validateShrinkIndex(clusterState, "source", Collections.emptySet(), "target",
|
MetadataCreateIndexService.validateShrinkIndex(clusterState, "source", "target",
|
||||||
Settings.builder().put("index.number_of_shards", targetShards).build());
|
Settings.builder().put("index.number_of_shards", targetShards).build());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,17 +255,17 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
|
|
||||||
assertEquals("index [source] already exists",
|
assertEquals("index [source] already exists",
|
||||||
expectThrows(ResourceAlreadyExistsException.class, () ->
|
expectThrows(ResourceAlreadyExistsException.class, () ->
|
||||||
MetadataCreateIndexService.validateSplitIndex(state, "target", Collections.emptySet(), "source", targetSettings)
|
MetadataCreateIndexService.validateSplitIndex(state, "target", "source", targetSettings)
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
assertEquals("no such index [no_such_index]",
|
assertEquals("no such index [no_such_index]",
|
||||||
expectThrows(IndexNotFoundException.class, () ->
|
expectThrows(IndexNotFoundException.class, () ->
|
||||||
MetadataCreateIndexService.validateSplitIndex(state, "no_such_index", Collections.emptySet(), "target", targetSettings)
|
MetadataCreateIndexService.validateSplitIndex(state, "no_such_index", "target", targetSettings)
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
assertEquals("the number of source shards [10] must be less that the number of target shards [5]",
|
assertEquals("the number of source shards [10] must be less that the number of target shards [5]",
|
||||||
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateSplitIndex(createClusterState("source",
|
expectThrows(IllegalArgumentException.class, () -> MetadataCreateIndexService.validateSplitIndex(createClusterState("source",
|
||||||
10, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(),
|
10, 0, Settings.builder().put("index.blocks.write", true).build()), "source",
|
||||||
"target", Settings.builder().put("index.number_of_shards", 5).build())
|
"target", Settings.builder().put("index.number_of_shards", 5).build())
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
|
@ -282,24 +274,17 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
expectThrows(IllegalStateException.class, () ->
|
expectThrows(IllegalStateException.class, () ->
|
||||||
MetadataCreateIndexService.validateSplitIndex(
|
MetadataCreateIndexService.validateSplitIndex(
|
||||||
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY)
|
createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY)
|
||||||
, "source", Collections.emptySet(), "target", targetSettings)
|
, "source", "target", targetSettings)
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
|
|
||||||
assertEquals("the number of source shards [3] must be a factor of [4]",
|
assertEquals("the number of source shards [3] must be a factor of [4]",
|
||||||
expectThrows(IllegalArgumentException.class, () ->
|
expectThrows(IllegalArgumentException.class, () ->
|
||||||
MetadataCreateIndexService.validateSplitIndex(createClusterState("source", 3, randomIntBetween(0, 10),
|
MetadataCreateIndexService.validateSplitIndex(createClusterState("source", 3, randomIntBetween(0, 10),
|
||||||
Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target",
|
Settings.builder().put("index.blocks.write", true).build()), "source", "target",
|
||||||
Settings.builder().put("index.number_of_shards", 4).build())
|
Settings.builder().put("index.number_of_shards", 4).build())
|
||||||
).getMessage());
|
).getMessage());
|
||||||
|
|
||||||
assertEquals("mappings are not allowed when resizing indices, all mappings are copied from the source index",
|
|
||||||
expectThrows(IllegalArgumentException.class, () -> {
|
|
||||||
MetadataCreateIndexService.validateSplitIndex(state, "source", singleton("foo"),
|
|
||||||
"target", targetSettings);
|
|
||||||
}
|
|
||||||
).getMessage());
|
|
||||||
|
|
||||||
int targetShards;
|
int targetShards;
|
||||||
do {
|
do {
|
||||||
targetShards = randomIntBetween(numShards+1, 100);
|
targetShards = randomIntBetween(numShards+1, 100);
|
||||||
|
@ -317,7 +302,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable();
|
routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable();
|
||||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||||
|
|
||||||
MetadataCreateIndexService.validateSplitIndex(clusterState, "source", Collections.emptySet(), "target",
|
MetadataCreateIndexService.validateSplitIndex(clusterState, "source", "target",
|
||||||
Settings.builder().put("index.number_of_shards", targetShards).build());
|
Settings.builder().put("index.number_of_shards", targetShards).build());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -463,7 +448,6 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
MetadataCreateIndexService.prepareResizeIndexSettings(
|
MetadataCreateIndexService.prepareResizeIndexSettings(
|
||||||
clusterState,
|
clusterState,
|
||||||
Collections.emptySet(),
|
|
||||||
indexSettingsBuilder,
|
indexSettingsBuilder,
|
||||||
clusterState.metadata().index(indexName).getIndex(),
|
clusterState.metadata().index(indexName).getIndex(),
|
||||||
"target",
|
"target",
|
||||||
|
@ -633,7 +617,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
.build();
|
.build();
|
||||||
request.settings(Settings.builder().put("request_setting", "value2").build());
|
request.settings(Settings.builder().put("request_setting", "value2").build());
|
||||||
|
|
||||||
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(), emptyMap(),
|
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(),
|
||||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
|
|
||||||
assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1"));
|
assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1"));
|
||||||
|
@ -671,7 +655,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
MetadataIndexTemplateService.resolveAliases(Collections.singletonList(templateMetadata)),
|
MetadataIndexTemplateService.resolveAliases(Collections.singletonList(templateMetadata)),
|
||||||
Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
|
Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
|
||||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, templateMetadata.settings(),
|
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, templateMetadata.settings(),
|
||||||
emptyMap(), null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
|
|
||||||
assertThat(resolvedAliases.get(0).getSearchRouting(), equalTo("fromRequest"));
|
assertThat(resolvedAliases.get(0).getSearchRouting(), equalTo("fromRequest"));
|
||||||
assertThat(aggregatedIndexSettings.get("key1"), equalTo("requestValue"));
|
assertThat(aggregatedIndexSettings.get("key1"), equalTo("requestValue"));
|
||||||
|
@ -686,14 +670,14 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDefaultSettings() {
|
public void testDefaultSettings() {
|
||||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, emptyMap(),
|
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
|
|
||||||
assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("1"));
|
assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("1"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSettingsFromClusterState() {
|
public void testSettingsFromClusterState() {
|
||||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, emptyMap(),
|
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||||
null, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 15).build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
null, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 15).build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||||
randomShardLimitService());
|
randomShardLimitService());
|
||||||
|
|
||||||
|
@ -718,8 +702,8 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
.putAlias(AliasMetadata.builder("alias1").searchRouting("1").build())
|
.putAlias(AliasMetadata.builder("alias1").searchRouting("1").build())
|
||||||
));
|
));
|
||||||
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request,
|
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request,
|
||||||
MetadataIndexTemplateService.resolveSettings(templates), emptyMap(),
|
MetadataIndexTemplateService.resolveSettings(templates), null, Settings.EMPTY,
|
||||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
List<AliasMetadata> resolvedAliases = resolveAndValidateAliases(request.index(), request.aliases(),
|
List<AliasMetadata> resolvedAliases = resolveAndValidateAliases(request.index(), request.aliases(),
|
||||||
MetadataIndexTemplateService.resolveAliases(templates),
|
MetadataIndexTemplateService.resolveAliases(templates),
|
||||||
Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
|
Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
|
||||||
|
@ -745,7 +729,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
createClusterState("sourceIndex", 1, 0,
|
createClusterState("sourceIndex", 1, 0,
|
||||||
Settings.builder().put("index.blocks.write", true).build());
|
Settings.builder().put("index.blocks.write", true).build());
|
||||||
|
|
||||||
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(), emptyMap(),
|
Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateMetadata.settings(),
|
||||||
clusterState.metadata().index("sourceIndex"), Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
clusterState.metadata().index("sourceIndex"), Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||||
randomShardLimitService());
|
randomShardLimitService());
|
||||||
|
|
||||||
|
@ -933,10 +917,11 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
assertThat(targetRoutingNumberOfShards, is(6));
|
assertThat(targetRoutingNumberOfShards, is(6));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public void testSoftDeletesDisabledDeprecation() {
|
public void testSoftDeletesDisabledDeprecation() {
|
||||||
request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test");
|
request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test");
|
||||||
request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false).build());
|
request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false).build());
|
||||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, Collections.emptyMap(),
|
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
assertWarnings("Creating indices with soft-deletes disabled is deprecated and will be removed in future Elasticsearch versions. "
|
assertWarnings("Creating indices with soft-deletes disabled is deprecated and will be removed in future Elasticsearch versions. "
|
||||||
+ "Please do not specify value for setting [index.soft_deletes.enabled] of index [test].");
|
+ "Please do not specify value for setting [index.soft_deletes.enabled] of index [test].");
|
||||||
|
@ -944,7 +929,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true).build());
|
request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true).build());
|
||||||
}
|
}
|
||||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, Collections.emptyMap(),
|
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -957,7 +942,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
|
||||||
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 128) + "mb");
|
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 128) + "mb");
|
||||||
}
|
}
|
||||||
request.settings(settings.build());
|
request.settings(settings.build());
|
||||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, Collections.emptyMap(),
|
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY,
|
||||||
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService());
|
||||||
assertWarnings("Translog retention settings [index.translog.retention.age] "
|
assertWarnings("Translog retention settings [index.translog.retention.age] "
|
||||||
+ "and [index.translog.retention.size] are deprecated and effectively ignored. They will be removed in a future version.");
|
+ "and [index.translog.retention.size] are deprecated and effectively ignored. They will be removed in a future version.");
|
||||||
|
|
Loading…
Reference in New Issue