Test: increase the wait for green cluster health calls (elastic/x-pack-elasticsearch#1703)

This commit increases the amount of time to wait for green cluster health during a rolling upgrade
to account for the time that may be needed in the case of delayed shards. Additionally some old
timeout values were removed as they were used due to the default timeout of 30s.

Relates elastic/x-pack-elasticsearch#1683

Original commit: elastic/x-pack-elasticsearch@9996673db0
This commit is contained in:
Jay Modi 2017-06-14 10:25:40 -06:00 committed by GitHub
parent 1157d3adf9
commit 9c8e12280b
11 changed files with 16 additions and 18 deletions

View File

@ -13,6 +13,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
import org.elasticsearch.xpack.ml.MachineLearningTemplateRegistry;
@ -86,6 +87,11 @@ public class UpgradeClusterClientYamlTestSuiteIT extends SecurityClusterClientYa
String token = "Basic " + Base64.getEncoder().encodeToString("elastic:changeme".getBytes(StandardCharsets.UTF_8));
return Settings.builder()
.put(ThreadContext.PREFIX + ".Authorization", token)
// we increase the timeout here to 90 seconds to handle long waits for a green
// cluster health. the waits for green need to be longer than a minute to
// account for delayed shards
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
.build();
}

View File

@ -7,7 +7,6 @@ setup:
# replicas, for example monitoring-data-*.
wait_for_status: yellow
wait_for_nodes: 2
timeout: 25s
---
"Index data and search on the mixed cluster":

View File

@ -6,7 +6,6 @@
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
timeout: 25s
- match: { timed_out: false }
- do:

View File

@ -4,7 +4,6 @@ setup:
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
timeout: 25s
- do:
update:
@ -43,7 +42,6 @@ teardown:
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
timeout: 25s
- match: { timed_out: false }
- do:

View File

@ -4,7 +4,6 @@ setup:
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
timeout: 25s
---
"Test get old cluster job":

View File

@ -3,7 +3,6 @@ setup:
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
timeout: 25s
---
"Test old cluster datafeed":

View File

@ -1,11 +1,5 @@
---
"Create user and role":
- do:
cluster.health:
wait_for_status: green
wait_for_nodes: 2
timeout: 25s
- do:
xpack.security.put_user:
username: "native_user"
@ -50,7 +44,6 @@
cluster.health:
index: ".security"
wait_for_active_shards: 2 # 1 primary and 1 replica since we have two nodes
timeout: 25s
---
"default password migration":

View File

@ -4,7 +4,8 @@
cluster.health:
wait_for_status: green
wait_for_nodes: 2
timeout: 25s
# wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s
level: shards
- do:

View File

@ -6,7 +6,8 @@
cluster.health:
wait_for_status: green
wait_for_nodes: 2
timeout: 25s
# wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s
- match: { timed_out: false }
- do:
@ -33,7 +34,8 @@
cluster.health:
wait_for_status: green
wait_for_nodes: 2
timeout: 25s
# wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s
- match: { timed_out: false }
- do:

View File

@ -3,7 +3,8 @@ setup:
cluster.health:
wait_for_status: green
wait_for_nodes: 2
timeout: 25s
# wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s
---
"Test open old jobs":

View File

@ -3,7 +3,8 @@ setup:
cluster.health:
wait_for_status: green
wait_for_nodes: 2
timeout: 25s
# wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s
- do:
indices.create: