mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-25 06:16:40 +00:00
improve tests execution time
This commit is contained in:
parent
306b3939cf
commit
f9ae132c72
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.shard.recovery;
|
package org.elasticsearch.index.shard.recovery;
|
||||||
|
|
||||||
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.apache.lucene.store.IndexOutput;
|
import org.apache.lucene.store.IndexOutput;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
@ -250,6 +251,11 @@ public class RecoveryTarget extends AbstractComponent {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cause instanceof AlreadyClosedException) {
|
||||||
|
listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
logger.trace("[{}][{}] recovery from [{}] failed", e, request.shardId().index().name(), request.shardId().id(), request.sourceNode());
|
logger.trace("[{}][{}] recovery from [{}] failed", e, request.shardId().index().name(), request.shardId().id(), request.sourceNode());
|
||||||
listener.onRecoveryFailure(new RecoveryFailedException(request, e), true);
|
listener.onRecoveryFailure(new RecoveryFailedException(request, e), true);
|
||||||
}
|
}
|
||||||
|
@ -66,8 +66,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
|
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("discovery.zen.minimum_master_nodes", 2)
|
.put("discovery.zen.minimum_master_nodes", 2)
|
||||||
.put("discovery.zen.ping_timeout", "500ms")
|
.put("discovery.zen.ping_timeout", "200ms")
|
||||||
.put("discovery.initial_state_timeout", "1s")
|
.put("discovery.initial_state_timeout", "500ms")
|
||||||
.put("gateway.type", "local")
|
.put("gateway.type", "local")
|
||||||
.put("index.number_of_shards", 1)
|
.put("index.number_of_shards", 1)
|
||||||
.build();
|
.build();
|
||||||
@ -82,7 +82,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
logger.info("--> start second node, cluster should be formed");
|
logger.info("--> start second node, cluster should be formed");
|
||||||
startNode("node2", settings);
|
startNode("node2", settings);
|
||||||
|
|
||||||
Thread.sleep(3000);
|
ClusterHealthResponse clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
|
||||||
|
assertThat(clusterHealthResponse.timedOut(), equalTo(false));
|
||||||
|
|
||||||
state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
||||||
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
|
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
|
||||||
@ -120,7 +121,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
logger.info("--> starting the previous master node again...");
|
logger.info("--> starting the previous master node again...");
|
||||||
startNode(masterNodeName, settings);
|
startNode(masterNodeName, settings);
|
||||||
|
|
||||||
Thread.sleep(3000);
|
clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
|
||||||
|
assertThat(clusterHealthResponse.timedOut(), equalTo(false));
|
||||||
|
|
||||||
state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
||||||
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
|
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
|
||||||
@ -155,7 +157,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
logger.info("--> starting the previous master node again...");
|
logger.info("--> starting the previous master node again...");
|
||||||
startNode(nonMasterNodeName, settings);
|
startNode(nonMasterNodeName, settings);
|
||||||
|
|
||||||
Thread.sleep(3000);
|
clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
|
||||||
|
assertThat(clusterHealthResponse.timedOut(), equalTo(false));
|
||||||
|
|
||||||
state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
||||||
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
|
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
|
||||||
@ -189,8 +192,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
|
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("discovery.zen.minimum_master_nodes", 3)
|
.put("discovery.zen.minimum_master_nodes", 3)
|
||||||
.put("discovery.zen.ping_timeout", "500ms")
|
.put("discovery.zen.ping_timeout", "200ms")
|
||||||
.put("discovery.initial_state_timeout", "1s")
|
.put("discovery.initial_state_timeout", "500ms")
|
||||||
.put("gateway.type", "local")
|
.put("gateway.type", "local")
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@ -199,6 +202,7 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
startNode("node2", settings);
|
startNode("node2", settings);
|
||||||
|
|
||||||
Thread.sleep(500);
|
Thread.sleep(500);
|
||||||
|
|
||||||
ClusterState state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
ClusterState state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
||||||
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
|
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
|
||||||
state = client("node2").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
state = client("node2").admin().cluster().prepareState().setLocal(true).execute().actionGet().state();
|
||||||
@ -208,7 +212,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
startNode("node3", settings);
|
startNode("node3", settings);
|
||||||
startNode("node4", settings);
|
startNode("node4", settings);
|
||||||
|
|
||||||
Thread.sleep(1000);
|
ClusterHealthResponse clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
|
||||||
|
assertThat(clusterHealthResponse.timedOut(), equalTo(false));
|
||||||
|
|
||||||
state = client("node1").admin().cluster().prepareState().execute().actionGet().state();
|
state = client("node1").admin().cluster().prepareState().execute().actionGet().state();
|
||||||
assertThat(state.nodes().size(), equalTo(4));
|
assertThat(state.nodes().size(), equalTo(4));
|
||||||
@ -241,7 +246,7 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
closeNode(nodeToShutdown);
|
closeNode(nodeToShutdown);
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread.sleep(3000);
|
Thread.sleep(500);
|
||||||
|
|
||||||
String lastNonMasterNodeUp = nonMasterNodes.removeLast();
|
String lastNonMasterNodeUp = nonMasterNodes.removeLast();
|
||||||
logger.info("--> verify that there is no master anymore on remaining nodes");
|
logger.info("--> verify that there is no master anymore on remaining nodes");
|
||||||
@ -255,7 +260,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests {
|
|||||||
startNode(nodeToShutdown, settings);
|
startNode(nodeToShutdown, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread.sleep(1000);
|
clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
|
||||||
|
assertThat(clusterHealthResponse.timedOut(), equalTo(false));
|
||||||
|
|
||||||
logger.info("Running Cluster Health");
|
logger.info("Running Cluster Health");
|
||||||
ClusterHealthResponse clusterHealth = client("node1").admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
|
ClusterHealthResponse clusterHealth = client("node1").admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
|
||||||
|
@ -55,7 +55,7 @@ public class WriteConsistencyLevelTests extends AbstractNodesTests {
|
|||||||
try {
|
try {
|
||||||
client("node1").prepareIndex("test", "type1", "1").setSource(source("1", "test"))
|
client("node1").prepareIndex("test", "type1", "1").setSource(source("1", "test"))
|
||||||
.setConsistencyLevel(WriteConsistencyLevel.QUORUM)
|
.setConsistencyLevel(WriteConsistencyLevel.QUORUM)
|
||||||
.setTimeout(timeValueSeconds(1)).execute().actionGet();
|
.setTimeout(timeValueMillis(100)).execute().actionGet();
|
||||||
assert false : "can't index, does not match consistency";
|
assert false : "can't index, does not match consistency";
|
||||||
} catch (UnavailableShardsException e) {
|
} catch (UnavailableShardsException e) {
|
||||||
// all is well
|
// all is well
|
||||||
@ -76,7 +76,7 @@ public class WriteConsistencyLevelTests extends AbstractNodesTests {
|
|||||||
try {
|
try {
|
||||||
client("node1").prepareIndex("test", "type1", "1").setSource(source("1", "test"))
|
client("node1").prepareIndex("test", "type1", "1").setSource(source("1", "test"))
|
||||||
.setConsistencyLevel(WriteConsistencyLevel.ALL)
|
.setConsistencyLevel(WriteConsistencyLevel.ALL)
|
||||||
.setTimeout(timeValueSeconds(1)).execute().actionGet();
|
.setTimeout(timeValueMillis(100)).execute().actionGet();
|
||||||
assert false : "can't index, does not match consistency";
|
assert false : "can't index, does not match consistency";
|
||||||
} catch (UnavailableShardsException e) {
|
} catch (UnavailableShardsException e) {
|
||||||
// all is well
|
// all is well
|
||||||
|
Loading…
x
Reference in New Issue
Block a user