Log warnings when cluster state publication failed to some nodes (#31233)
If the publishing of a cluster state to a node fails, we currently only log it as debug information and only on the master. This makes it hard to see the cause of (test) failures when logging is set to default levels. This PR adds a warn level log on the node receiving the cluster state when it fails to deserialise the cluster state and a warn level log on the master with a list of nodes for which publication failed.
This commit is contained in:
parent
8b4d80ad09
commit
8c9360b5a1
|
@ -22,6 +22,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -35,6 +36,7 @@ public class BlockingClusterStatePublishResponseHandler {
|
|||
|
||||
private final CountDownLatch latch;
|
||||
private final Set<DiscoveryNode> pendingNodes;
|
||||
private final Set<DiscoveryNode> failedNodes;
|
||||
|
||||
/**
|
||||
* Creates a new BlockingClusterStatePublishResponseHandler
|
||||
|
@ -44,6 +46,7 @@ public class BlockingClusterStatePublishResponseHandler {
|
|||
this.pendingNodes = ConcurrentCollections.newConcurrentSet();
|
||||
this.pendingNodes.addAll(publishingToNodes);
|
||||
this.latch = new CountDownLatch(pendingNodes.size());
|
||||
this.failedNodes = ConcurrentCollections.newConcurrentSet();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -64,6 +67,8 @@ public class BlockingClusterStatePublishResponseHandler {
|
|||
public void onFailure(DiscoveryNode node, Exception e) {
|
||||
boolean found = pendingNodes.remove(node);
|
||||
assert found : "node [" + node + "] already responded or failed";
|
||||
boolean added = failedNodes.add(node);
|
||||
assert added : "duplicate failures for " + node;
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -86,4 +91,11 @@ public class BlockingClusterStatePublishResponseHandler {
|
|||
// nulls if some nodes responded in the meanwhile
|
||||
return pendingNodes.toArray(new DiscoveryNode[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* returns a set of nodes for which publication has failed.
|
||||
*/
|
||||
public Set<DiscoveryNode> getFailedNodes() {
|
||||
return Collections.unmodifiableSet(failedNodes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -41,6 +40,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler;
|
||||
import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
|
@ -207,6 +207,12 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
clusterState.version(), publishTimeout, pendingNodes);
|
||||
}
|
||||
}
|
||||
// The failure is logged under debug when a sending failed. we now log a summary.
|
||||
Set<DiscoveryNode> failedNodes = publishResponseHandler.getFailedNodes();
|
||||
if (failedNodes.isEmpty() == false) {
|
||||
logger.warn("publishing cluster state with version [{}] failed for the following nodes: [{}]",
|
||||
clusterChangedEvent.state().version(), failedNodes);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// ignore & restore interrupt
|
||||
Thread.currentThread().interrupt();
|
||||
|
@ -367,14 +373,14 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
protected void handleIncomingClusterStateRequest(BytesTransportRequest request, TransportChannel channel) throws IOException {
|
||||
Compressor compressor = CompressorFactory.compressor(request.bytes());
|
||||
StreamInput in = request.bytes().streamInput();
|
||||
try {
|
||||
if (compressor != null) {
|
||||
in = compressor.streamInput(in);
|
||||
}
|
||||
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
|
||||
in.setVersion(request.version());
|
||||
synchronized (lastSeenClusterStateMutex) {
|
||||
final ClusterState incomingState;
|
||||
final ClusterState incomingState;
|
||||
synchronized (lastSeenClusterStateMutex) {
|
||||
try {
|
||||
if (compressor != null) {
|
||||
in = compressor.streamInput(in);
|
||||
}
|
||||
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
|
||||
in.setVersion(request.version());
|
||||
// If true we received full cluster state - otherwise diffs
|
||||
if (in.readBoolean()) {
|
||||
incomingState = ClusterState.readFrom(in, transportService.getLocalNode());
|
||||
|
@ -391,14 +397,17 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
logger.debug("received diff for but don't have any local cluster state - requesting full state");
|
||||
throw new IncompatibleClusterStateVersionException("have no local cluster state");
|
||||
}
|
||||
incomingClusterStateListener.onIncomingClusterState(incomingState);
|
||||
lastSeenClusterState = incomingState;
|
||||
} catch (IncompatibleClusterStateVersionException e) {
|
||||
incompatibleClusterStateDiffReceivedCount.incrementAndGet();
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
logger.warn("unexpected error while deserializing an incoming cluster state", e);
|
||||
throw e;
|
||||
} finally {
|
||||
IOUtils.close(in);
|
||||
}
|
||||
} catch (IncompatibleClusterStateVersionException e) {
|
||||
incompatibleClusterStateDiffReceivedCount.incrementAndGet();
|
||||
throw e;
|
||||
} finally {
|
||||
IOUtils.close(in);
|
||||
incomingClusterStateListener.onIncomingClusterState(incomingState);
|
||||
lastSeenClusterState = incomingState;
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
|
|
|
@ -85,10 +85,16 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase
|
|||
int firstRound = randomIntBetween(5, nodeCount - 1);
|
||||
Thread[] threads = new Thread[firstRound];
|
||||
CyclicBarrier barrier = new CyclicBarrier(firstRound);
|
||||
Set<DiscoveryNode> expectedFailures = new HashSet<>();
|
||||
Set<DiscoveryNode> completedNodes = new HashSet<>();
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
completedNodes.add(allNodes[i]);
|
||||
threads[i] = new Thread(new PublishResponder(randomBoolean(), allNodes[i], barrier, logger, handler));
|
||||
final DiscoveryNode node = allNodes[i];
|
||||
completedNodes.add(node);
|
||||
final boolean fail = randomBoolean();
|
||||
if (fail) {
|
||||
expectedFailures.add(node);
|
||||
}
|
||||
threads[i] = new Thread(new PublishResponder(fail, node, barrier, logger, handler));
|
||||
threads[i].start();
|
||||
}
|
||||
// wait on the threads to finish
|
||||
|
@ -105,7 +111,12 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase
|
|||
barrier = new CyclicBarrier(secondRound);
|
||||
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i] = new Thread(new PublishResponder(randomBoolean(), allNodes[firstRound + i], barrier, logger, handler));
|
||||
final DiscoveryNode node = allNodes[firstRound + i];
|
||||
final boolean fail = randomBoolean();
|
||||
if (fail) {
|
||||
expectedFailures.add(node);
|
||||
}
|
||||
threads[i] = new Thread(new PublishResponder(fail, node, barrier, logger, handler));
|
||||
threads[i].start();
|
||||
}
|
||||
// wait on the threads to finish
|
||||
|
@ -114,6 +125,6 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase
|
|||
}
|
||||
assertTrue("expected handler not to timeout as all nodes responded", handler.awaitAllNodes(new TimeValue(10)));
|
||||
assertThat(handler.pendingNodes(), arrayWithSize(0));
|
||||
|
||||
assertThat(handler.getFailedNodes(), equalTo(expectedFailures));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue