Do not copy initial recovery filter during split (#44053)
If an index is the result of a shrink then it will have a value set for `index.routing.allocation.initial_recovery._id`. If this index is subsequently split then this value will be copied over, forcing the initial allocation of the split shards to occur on the node on which the shrink took place. Moreover if this node no longer exists then the split will fail. This commit suppresses the copying of this setting when splitting an index. Fixes #43955
This commit is contained in:
parent
af9b98e81c
commit
3129f5b42e
|
@ -761,19 +761,22 @@ public class MetaDataCreateIndexService {
|
||||||
final ResizeType type,
|
final ResizeType type,
|
||||||
final boolean copySettings,
|
final boolean copySettings,
|
||||||
final IndexScopedSettings indexScopedSettings) {
|
final IndexScopedSettings indexScopedSettings) {
|
||||||
|
|
||||||
|
// we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away
|
||||||
|
// once we are allocated.
|
||||||
|
final String initialRecoveryIdFilter = IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id";
|
||||||
|
|
||||||
final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName());
|
final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName());
|
||||||
if (type == ResizeType.SHRINK) {
|
if (type == ResizeType.SHRINK) {
|
||||||
final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(),
|
final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(),
|
||||||
mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||||
indexSettingsBuilder
|
indexSettingsBuilder
|
||||||
// we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away
|
.put(initialRecoveryIdFilter, Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()))
|
||||||
// once we are allocated.
|
|
||||||
.put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id",
|
|
||||||
Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()))
|
|
||||||
// we only try once and then give up with a shrink index
|
// we only try once and then give up with a shrink index
|
||||||
.put("index.allocation.max_retries", 1);
|
.put("index.allocation.max_retries", 1);
|
||||||
} else if (type == ResizeType.SPLIT) {
|
} else if (type == ResizeType.SPLIT) {
|
||||||
validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||||
|
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalStateException("unknown resize type is " + type);
|
throw new IllegalStateException("unknown resize type is " + type);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
||||||
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
||||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||||
|
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||||
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
|
@ -61,6 +62,7 @@ import org.elasticsearch.index.seqno.SeqNoStats;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.elasticsearch.test.VersionUtils;
|
import org.elasticsearch.test.VersionUtils;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -554,4 +556,42 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
||||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
||||||
)).get();
|
)).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testShrinkThenSplitWithFailedNode() throws Exception {
|
||||||
|
internalCluster().ensureAtLeastNumDataNodes(3);
|
||||||
|
|
||||||
|
final int shardCount = between(2, 5);
|
||||||
|
prepareCreate("original").setSettings(Settings.builder().put(indexSettings())
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardCount)).get();
|
||||||
|
client().admin().indices().prepareFlush("original").get();
|
||||||
|
ensureGreen();
|
||||||
|
final String shrinkNode
|
||||||
|
= client().admin().cluster().prepareNodesInfo("data:true").clear().get().getNodes().get(0).getNode().getName();
|
||||||
|
client().admin().indices().prepareUpdateSettings("original")
|
||||||
|
.setSettings(Settings.builder()
|
||||||
|
.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode)
|
||||||
|
.put(IndexMetaData.SETTING_BLOCKS_WRITE, true)).get();
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
assertAcked(client().admin().indices().prepareResizeIndex("original", "shrunk").setSettings(Settings.builder()
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
|
.putNull(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey())
|
||||||
|
.build()).setResizeType(ResizeType.SHRINK).get());
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(shrinkNode));
|
||||||
|
|
||||||
|
// demonstrate that the index.routing.allocation.initial_recovery setting from the shrink doesn't carry over into the split index,
|
||||||
|
// because this would cause the shrink to fail as the initial_recovery node is no longer present.
|
||||||
|
|
||||||
|
logger.info("--> executing split");
|
||||||
|
assertAcked(client().admin().indices().prepareResizeIndex("shrunk", "splitagain").setSettings(Settings.builder()
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardCount)
|
||||||
|
.putNull(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey())
|
||||||
|
.build()).setResizeType(ResizeType.SPLIT));
|
||||||
|
ensureGreen("splitagain");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue