Single node rolling restart into a new node can cause metadata loss, closes #1249.
This commit is contained in:
parent
da56a4d332
commit
be7d3b609f
|
@ -95,6 +95,8 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
|
|||
|
||||
private volatile boolean initialized = false;
|
||||
|
||||
private volatile boolean metaDataPersistedAtLeastOnce = false;
|
||||
|
||||
@Inject public LocalGateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv,
|
||||
TransportNodesListGatewayMetaState listGatewayMetaState, TransportNodesListGatewayStartedShards listGatewayStartedShards) {
|
||||
super(settings);
|
||||
|
@ -193,7 +195,7 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
|
|||
}
|
||||
|
||||
// we only write the local metadata if this is a possible master node
|
||||
if (event.state().nodes().localNode().masterNode() && event.metaDataChanged()) {
|
||||
if (event.state().nodes().localNode().masterNode() && (event.metaDataChanged() || !metaDataPersistedAtLeastOnce)) {
|
||||
executor.execute(new LoggingRunnable(logger, new PersistMetaData(event)));
|
||||
}
|
||||
|
||||
|
@ -443,6 +445,7 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
|
|||
} catch (IOException e) {
|
||||
logger.warn("failed to write updated state", e);
|
||||
}
|
||||
metaDataPersistedAtLeastOnce = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
|
|||
import org.elasticsearch.action.admin.indices.status.ShardStatus;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.gateway.Gateway;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalNode;
|
||||
import org.elasticsearch.test.integration.AbstractNodesTests;
|
||||
|
@ -365,4 +366,31 @@ public class SimpleRecoveryLocalGatewayTests extends AbstractNodesTests {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test public void testRecoveryDifferentNodeOrderStartup() throws Exception {
|
||||
// we need different data paths so we make sure we start the second node fresh
|
||||
buildNode("node1", settingsBuilder().put("gateway.type", "local").put("path.data", "data/data1").build());
|
||||
buildNode("node2", settingsBuilder().put("gateway.type", "local").put("path.data", "data/data2").build());
|
||||
cleanAndCloseNodes();
|
||||
|
||||
startNode("node1", settingsBuilder().put("gateway.type", "local").put("path.data", "data/data1").build());
|
||||
|
||||
client("node1").prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
|
||||
|
||||
startNode("node2", settingsBuilder().put("gateway.type", "local").put("path.data", "data/data2").build());
|
||||
|
||||
ClusterHealthResponse health = client("node2").admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
|
||||
assertThat(health.timedOut(), equalTo(false));
|
||||
|
||||
closeNode("node1");
|
||||
closeNode("node2");
|
||||
|
||||
startNode("node2", settingsBuilder().put("gateway.type", "local").put("path.data", "data/data2").build());
|
||||
|
||||
health = client("node2").admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
|
||||
assertThat(health.timedOut(), equalTo(false));
|
||||
|
||||
assertThat(client("node2").admin().indices().prepareExists("test").execute().actionGet().exists(), equalTo(true));
|
||||
assertThat(client("node2").prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().count(), equalTo(1l));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue