Fix SourceOnlySnapshotIT (#37461)
The SourceOnlySnapshotIT class tests a source only repository using the following scenario: starts a master node starts a data node creates a source only repository creates an index with documents snapshots the index to the source only repository deletes the index stops the data node starts a new data node restores the index Thanks to ESIntegTestCase the index is sometimes created using a custom data path. With such a setting, when a shard is assigned to one of the data node of the cluster the shard path is resolved using the index custom data path and the node's lock id by the NodeEnvironment#resolveCustomLocation(). It should work nicely but in SourceOnlySnapshotIT.snashotAndRestore(), b efore the change in this PR, the last data node was restarted using a different path.home. At startup time this node was assigned a node lock based on other locks in the data directory of this temporary path.home which is empty. So it always got the 0 lock id. And when this new data node is assigned a shard for the index and resolves it against the index custom data path, it also uses the node lock id 0 which conflicts with another node of the cluster, resulting in various errors with the most obvious one being LockObtainFailedException. This commit removes the temporary home path for the last data node so that it uses the same path home as other nodes of the cluster and then got assigned a correct node lock id at startup. Closes #36330 Closes #36276
This commit is contained in:
parent
a88c050a05
commit
e848388865
|
@ -28,7 +28,6 @@ import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||||
import org.elasticsearch.index.engine.EngineFactory;
|
import org.elasticsearch.index.engine.EngineFactory;
|
||||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.node.Node;
|
|
||||||
import org.elasticsearch.plugins.EnginePlugin;
|
import org.elasticsearch.plugins.EnginePlugin;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.plugins.RepositoryPlugin;
|
import org.elasticsearch.plugins.RepositoryPlugin;
|
||||||
|
@ -48,13 +47,13 @@ import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.function.BiConsumer;
|
import java.util.function.BiConsumer;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||||
|
|
||||||
|
@ESIntegTestCase.ClusterScope(numDataNodes = 0)
|
||||||
public class SourceOnlySnapshotIT extends ESIntegTestCase {
|
public class SourceOnlySnapshotIT extends ESIntegTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -92,12 +91,6 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testToStopSuiteFailing() {
|
|
||||||
// This is required because otherwise every test in the suite is muted
|
|
||||||
// TODO remove this when one of the other tests is fixed
|
|
||||||
}
|
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36330")
|
|
||||||
public void testSnapshotAndRestore() throws Exception {
|
public void testSnapshotAndRestore() throws Exception {
|
||||||
final String sourceIdx = "test-idx";
|
final String sourceIdx = "test-idx";
|
||||||
boolean requireRouting = randomBoolean();
|
boolean requireRouting = randomBoolean();
|
||||||
|
@ -128,7 +121,6 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase {
|
||||||
assertHits(sourceIdx, builders.length, sourceHadDeletions);
|
assertHits(sourceIdx, builders.length, sourceHadDeletions);
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36276")
|
|
||||||
public void testSnapshotAndRestoreWithNested() throws Exception {
|
public void testSnapshotAndRestoreWithNested() throws Exception {
|
||||||
final String sourceIdx = "test-idx";
|
final String sourceIdx = "test-idx";
|
||||||
boolean requireRouting = randomBoolean();
|
boolean requireRouting = randomBoolean();
|
||||||
|
@ -215,12 +207,13 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase {
|
||||||
client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get();
|
client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private IndexRequestBuilder[] snashotAndRestore(String sourceIdx, int numShards, boolean minimal, boolean requireRouting, boolean
|
private IndexRequestBuilder[] snashotAndRestore(final String sourceIdx,
|
||||||
useNested)
|
final int numShards,
|
||||||
throws ExecutionException, InterruptedException, IOException {
|
final boolean minimal,
|
||||||
|
final boolean requireRouting,
|
||||||
|
final boolean useNested) throws InterruptedException, IOException {
|
||||||
logger.info("--> starting a master node and a data node");
|
logger.info("--> starting a master node and a data node");
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
internalCluster().startDataOnlyNode();
|
internalCluster().startDataOnlyNode();
|
||||||
|
@ -284,12 +277,8 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase {
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("1");
|
client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("1");
|
||||||
|
|
||||||
logger.info("--> start a new data node");
|
final String newDataNode = internalCluster().startDataOnlyNode();
|
||||||
final Settings dataSettings = Settings.builder()
|
logger.info("--> start a new data node " + newDataNode);
|
||||||
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLength(5))
|
|
||||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) // to get a new node id
|
|
||||||
.build();
|
|
||||||
internalCluster().startDataOnlyNode(dataSettings);
|
|
||||||
client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("2");
|
client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("2");
|
||||||
|
|
||||||
logger.info("--> restore the index and ensure all shards are allocated");
|
logger.info("--> restore the index and ensure all shards are allocated");
|
||||||
|
|
Loading…
Reference in New Issue