Logging: BroadcastOperationAction - added trace logging for successful shard-level responses

In order to be able to trace the exact shards that participated in the operation.
This commit is contained in:
Boaz Leskes 2014-11-15 18:32:58 +01:00
parent 983a108776
commit 37661aed60
2 changed files with 7 additions and 4 deletions

View File

@ -211,6 +211,7 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
@SuppressWarnings({"unchecked"})
protected void onOperation(ShardRouting shard, int shardIndex, ShardResponse response) {
logger.trace("received response for {}", shard);
shardsResponses.set(shardIndex, response);
if (expectedOps == counterOps.incrementAndGet()) {
finishHim();
@ -222,7 +223,6 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
// we set the shard failure always, even if its the first in the replication group, and the next one
// will work (it will just override it...)
setFailure(shardIt, shardIndex, t);
ShardRouting nextShard = shardIt.nextOrNull();
if (nextShard != null) {
if (t != null) {

View File

@ -54,7 +54,7 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
@Test
@Slow
@TestLogging("indices.cluster:TRACE,cluster.service:TRACE,action.search:TRACE,indices.recovery:TRACE")
@TestLogging("indices.cluster:TRACE,cluster.service:TRACE,action.count:TRACE,indices.recovery:TRACE")
public void testFullRollingRestart() throws Exception {
internalCluster().startNode();
createIndex("test");
@ -69,13 +69,13 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
.setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
}
// now start adding nodes
logger.info("--> now start adding nodes");
internalCluster().startNodesAsync(2).get();
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
// now start adding nodes
logger.info("--> add two more nodes");
internalCluster().startNodesAsync(2).get();
// We now have 5 nodes
@ -84,6 +84,7 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("5"));
logger.info("--> refreshing and checking data");
refresh();
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
@ -101,6 +102,7 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
logger.info("--> stopped two nodes, verifying data");
refresh();
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
@ -118,6 +120,7 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForNodes("1"));
logger.info("--> one node left, verifying data");
refresh();
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);