mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Discovery: check index uuid when merging incoming cluster state into local
In big deployment ClusterState can be large. To make sure we keep reusing objects that were promoted to the Old Gen, ZenDiscovery has an optimization where it tries to reuse existing IndexMetaData object (containing among other things the mappings) from the current cluster state if they didn't change. The comparison currently uses the index name and the metadata version. This is however not enough and we should also check the index uuid. In extreme cases, where cluster state processing is slow and the index in question is deleted and recreated and these operations are batch processed together, we can use the wrong meta data if the version is also identical. This can happen if people create the index with all meta data predefined and no settings were changed. Closes #9489 Closes #9541
This commit is contained in:
parent
6cdde31e64
commit
896e8657ea
@ -836,10 +836,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(updatedState.metaData()).removeAllIndices();
|
||||
for (IndexMetaData indexMetaData : updatedState.metaData()) {
|
||||
IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.index());
|
||||
if (currentIndexMetaData == null || currentIndexMetaData.version() != indexMetaData.version()) {
|
||||
metaDataBuilder.put(indexMetaData, false);
|
||||
} else {
|
||||
if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.uuid()) &&
|
||||
currentIndexMetaData.version() == indexMetaData.version()) {
|
||||
// safe to reuse
|
||||
metaDataBuilder.put(currentIndexMetaData, false);
|
||||
} else {
|
||||
metaDataBuilder.put(indexMetaData, false);
|
||||
}
|
||||
}
|
||||
builder.metaData(metaDataBuilder);
|
||||
|
@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.DiskUsage;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
@ -30,14 +31,25 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.disruption.BlockClusterStateProcessing;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
/**
|
||||
*/
|
||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0)
|
||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0)
|
||||
public class RareClusterStateTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
@Override
|
||||
@ -52,6 +64,7 @@ public class RareClusterStateTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
@Test
|
||||
public void testUnassignedShardAndEmptyNodesInRoutingTable() throws Exception {
|
||||
internalCluster().startNode();
|
||||
createIndex("a");
|
||||
ensureSearchable("a");
|
||||
ClusterState current = clusterService().state();
|
||||
@ -66,8 +79,40 @@ public class RareClusterStateTests extends ElasticsearchIntegrationTest {
|
||||
);
|
||||
ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.<String, DiskUsage>of(), ImmutableMap.<String, Long>of());
|
||||
|
||||
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), clusterInfo);
|
||||
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), clusterInfo);
|
||||
allocator.allocateUnassigned(routingAllocation);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestLogging(value = "cluster.service:TRACE")
|
||||
public void testDeleteCreateInOneBulk() throws Exception {
|
||||
internalCluster().startNodesAsync(2, ImmutableSettings.builder()
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen")
|
||||
.build()).get();
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||
prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get();
|
||||
ensureGreen("test");
|
||||
|
||||
// now that the cluster is stable, remove publishing timeout
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0")));
|
||||
|
||||
Set<String> nodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames()));
|
||||
nodes.remove(internalCluster().getMasterName());
|
||||
|
||||
// block none master node.
|
||||
BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(nodes.iterator().next(), getRandom());
|
||||
internalCluster().setDisruptionScheme(disruption);
|
||||
logger.info("--> indexing a doc");
|
||||
index("test", "type", "1");
|
||||
refresh();
|
||||
disruption.startDisrupting();
|
||||
logger.info("--> delete index and recreate it");
|
||||
assertFalse(client().admin().indices().prepareDelete("test").setTimeout("200ms").get().isAcknowledged());
|
||||
assertFalse(prepareCreate("test").setTimeout("200ms").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).get().isAcknowledged());
|
||||
logger.info("--> letting cluster proceed");
|
||||
disruption.stopDisrupting();
|
||||
ensureGreen(TimeValue.timeValueMinutes(30), "test");
|
||||
assertHitCount(client().prepareSearch("test").get(), 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.test.disruption;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateNonMasterUpdateTask;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class BlockClusterStateProcessing extends SingleNodeDisruption {
|
||||
|
||||
AtomicReference<CountDownLatch> disruptionLatch = new AtomicReference<>();
|
||||
|
||||
|
||||
public BlockClusterStateProcessing(Random random) {
|
||||
this(null, random);
|
||||
}
|
||||
|
||||
public BlockClusterStateProcessing(String disruptedNode, Random random) {
|
||||
super(random);
|
||||
this.disruptedNode = disruptedNode;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void startDisrupting() {
|
||||
final String disruptionNodeCopy = disruptedNode;
|
||||
if (disruptionNodeCopy == null) {
|
||||
return;
|
||||
}
|
||||
ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
|
||||
if (clusterService == null) {
|
||||
return;
|
||||
}
|
||||
logger.info("delaying cluster state updates on node [{}]", disruptionNodeCopy);
|
||||
boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
|
||||
assert success : "startDisrupting called without waiting on stopDistrupting to complete";
|
||||
final CountDownLatch started = new CountDownLatch(1);
|
||||
clusterService.submitStateUpdateTask("service_disruption_block", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
started.countDown();
|
||||
CountDownLatch latch = disruptionLatch.get();
|
||||
if (latch != null) {
|
||||
latch.await();
|
||||
}
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected error during disruption", t);
|
||||
}
|
||||
});
|
||||
try {
|
||||
started.await();
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stopDisrupting() {
|
||||
CountDownLatch latch = disruptionLatch.get();
|
||||
if (latch != null) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue expectedTimeToHeal() {
|
||||
return TimeValue.timeValueMinutes(0);
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user