From f9ae132c720c18dcb968bf2df30d27d93ccd0cdf Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Wed, 24 Aug 2011 22:00:27 +0300 Subject: [PATCH] improve tests execution time --- .../index/shard/recovery/RecoveryTarget.java | 6 +++++ .../cluster/MinimumMasterNodesTests.java | 26 ++++++++++++------- .../WriteConsistencyLevelTests.java | 4 +-- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryTarget.java b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryTarget.java index d7438883692..0385fe9d996 100644 --- a/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryTarget.java +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/index/shard/recovery/RecoveryTarget.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.shard.recovery; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.IndexOutput; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; @@ -250,6 +251,11 @@ public class RecoveryTarget extends AbstractComponent { return; } + if (cause instanceof AlreadyClosedException) { + listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")"); + return; + } + logger.trace("[{}][{}] recovery from [{}] failed", e, request.shardId().index().name(), request.shardId().id(), request.sourceNode()); listener.onRecoveryFailure(new RecoveryFailedException(request, e), true); } diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/cluster/MinimumMasterNodesTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/cluster/MinimumMasterNodesTests.java index 5e356827262..105faf8856c 100644 --- a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/cluster/MinimumMasterNodesTests.java +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/cluster/MinimumMasterNodesTests.java @@ -66,8 +66,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { Settings settings = settingsBuilder() .put("discovery.zen.minimum_master_nodes", 2) - .put("discovery.zen.ping_timeout", "500ms") - .put("discovery.initial_state_timeout", "1s") + .put("discovery.zen.ping_timeout", "200ms") + .put("discovery.initial_state_timeout", "500ms") .put("gateway.type", "local") .put("index.number_of_shards", 1) .build(); @@ -82,7 +82,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { logger.info("--> start second node, cluster should be formed"); startNode("node2", settings); - Thread.sleep(3000); + ClusterHealthResponse clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertThat(clusterHealthResponse.timedOut(), equalTo(false)); state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state(); assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false)); @@ -120,7 +121,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { logger.info("--> starting the previous master node again..."); startNode(masterNodeName, settings); - Thread.sleep(3000); + clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertThat(clusterHealthResponse.timedOut(), equalTo(false)); state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state(); assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false)); @@ -155,7 +157,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { logger.info("--> starting the previous master node again..."); startNode(nonMasterNodeName, settings); - Thread.sleep(3000); + clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertThat(clusterHealthResponse.timedOut(), equalTo(false)); state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state(); assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false)); @@ -189,8 +192,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { Settings settings = settingsBuilder() .put("discovery.zen.minimum_master_nodes", 3) - .put("discovery.zen.ping_timeout", "500ms") - .put("discovery.initial_state_timeout", "1s") + .put("discovery.zen.ping_timeout", "200ms") + .put("discovery.initial_state_timeout", "500ms") .put("gateway.type", "local") .build(); @@ -199,6 +202,7 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { startNode("node2", settings); Thread.sleep(500); + ClusterState state = client("node1").admin().cluster().prepareState().setLocal(true).execute().actionGet().state(); assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true)); state = client("node2").admin().cluster().prepareState().setLocal(true).execute().actionGet().state(); @@ -208,7 +212,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { startNode("node3", settings); startNode("node4", settings); - Thread.sleep(1000); + ClusterHealthResponse clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet(); + assertThat(clusterHealthResponse.timedOut(), equalTo(false)); state = client("node1").admin().cluster().prepareState().execute().actionGet().state(); assertThat(state.nodes().size(), equalTo(4)); @@ -241,7 +246,7 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { closeNode(nodeToShutdown); } - Thread.sleep(3000); + Thread.sleep(500); String lastNonMasterNodeUp = nonMasterNodes.removeLast(); logger.info("--> verify that there is no master anymore on remaining nodes"); @@ -255,7 +260,8 @@ public class MinimumMasterNodesTests extends AbstractZenNodesTests { startNode(nodeToShutdown, settings); } - Thread.sleep(1000); + clusterHealthResponse = client("node1").admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet(); + assertThat(clusterHealthResponse.timedOut(), equalTo(false)); logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = client("node1").admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet(); diff --git a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/consistencylevel/WriteConsistencyLevelTests.java b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/consistencylevel/WriteConsistencyLevelTests.java index aecb7a8c28e..3bd9a6f2a2d 100644 --- a/modules/test/integration/src/test/java/org/elasticsearch/test/integration/consistencylevel/WriteConsistencyLevelTests.java +++ b/modules/test/integration/src/test/java/org/elasticsearch/test/integration/consistencylevel/WriteConsistencyLevelTests.java @@ -55,7 +55,7 @@ public class WriteConsistencyLevelTests extends AbstractNodesTests { try { client("node1").prepareIndex("test", "type1", "1").setSource(source("1", "test")) .setConsistencyLevel(WriteConsistencyLevel.QUORUM) - .setTimeout(timeValueSeconds(1)).execute().actionGet(); + .setTimeout(timeValueMillis(100)).execute().actionGet(); assert false : "can't index, does not match consistency"; } catch (UnavailableShardsException e) { // all is well @@ -76,7 +76,7 @@ public class WriteConsistencyLevelTests extends AbstractNodesTests { try { client("node1").prepareIndex("test", "type1", "1").setSource(source("1", "test")) .setConsistencyLevel(WriteConsistencyLevel.ALL) - .setTimeout(timeValueSeconds(1)).execute().actionGet(); + .setTimeout(timeValueMillis(100)).execute().actionGet(); assert false : "can't index, does not match consistency"; } catch (UnavailableShardsException e) { // all is well