[ML] Increase assertBusy timeout in ML node failure tests (#52425)
Following the change to store cluster state in Lucene indices (#50907) it can take longer for all the cluster state updates associated with node failure scenarios to be processed during internal cluster tests where several nodes all run in the same JVM.
This commit is contained in:
parent
20862fe64f
commit
48ccf36db9
|
@ -451,6 +451,10 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
|
|||
// else.
|
||||
persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(200));
|
||||
|
||||
// The timeout here was increased from 10 seconds to 20 seconds in response to the changes in
|
||||
// https://github.com/elastic/elasticsearch/pull/50907 - now that the cluster state is stored
|
||||
// in a Lucene index it can take a while to update when there are many updates in quick
|
||||
// succession, like we see in internal cluster tests of node failure scenarios
|
||||
assertBusy(() -> {
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
PersistentTasksCustomMetaData tasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
|
@ -471,7 +475,7 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
|
|||
.getResponse().results().get(0);
|
||||
assertEquals(DatafeedState.STARTED, datafeedStats.getDatafeedState());
|
||||
assertNotNull(datafeedStats.getNode());
|
||||
});
|
||||
}, 20, TimeUnit.SECONDS);
|
||||
|
||||
long numDocs2 = randomIntBetween(2, 64);
|
||||
long now2 = System.currentTimeMillis();
|
||||
|
|
Loading…
Reference in New Issue