Saving some cycles here and there on the IO loop: * Don't instantiate new `Runnable` to execute on `SAME` in a few spots * Don't instantiate complicated wrapped stream for empty messages * Stop instantiating almost never used `ClusterStateObserver` in two spots * Some minor cleanup and preventing pointless `Predicate<>` instantiation in transport master node action
This commit is contained in:
parent
a3a0c63ccf
commit
08dbd6d989
|
@ -107,35 +107,31 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
|
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
|
||||||
new AsyncSingleAction(task, request, listener).start();
|
ClusterState state = clusterService.state();
|
||||||
|
logger.trace("starting processing request [{}] with cluster state version [{}]", request, state.version());
|
||||||
|
if (task != null) {
|
||||||
|
request.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
|
}
|
||||||
|
new AsyncSingleAction(task, request, listener).doStart(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
class AsyncSingleAction {
|
class AsyncSingleAction {
|
||||||
|
|
||||||
private final ActionListener<Response> listener;
|
private final ActionListener<Response> listener;
|
||||||
private final Request request;
|
private final Request request;
|
||||||
private volatile ClusterStateObserver observer;
|
private ClusterStateObserver observer;
|
||||||
|
private final long startTime;
|
||||||
private final Task task;
|
private final Task task;
|
||||||
|
|
||||||
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
|
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
|
||||||
this.task = task;
|
this.task = task;
|
||||||
this.request = request;
|
this.request = request;
|
||||||
if (task != null) {
|
|
||||||
request.setParentTask(clusterService.localNode().getId(), task.getId());
|
|
||||||
}
|
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
}
|
this.startTime = threadPool.relativeTimeInMillis();
|
||||||
|
|
||||||
public void start() {
|
|
||||||
ClusterState state = clusterService.state();
|
|
||||||
this.observer
|
|
||||||
= new ClusterStateObserver(state, clusterService, request.masterNodeTimeout(), logger, threadPool.getThreadContext());
|
|
||||||
doStart(state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void doStart(ClusterState clusterState) {
|
protected void doStart(ClusterState clusterState) {
|
||||||
try {
|
try {
|
||||||
final Predicate<ClusterState> masterChangePredicate = MasterNodeChangePredicate.build(clusterState);
|
|
||||||
final DiscoveryNodes nodes = clusterState.nodes();
|
final DiscoveryNodes nodes = clusterState.nodes();
|
||||||
if (nodes.isLocalNodeElectedMaster() || localExecute(request)) {
|
if (nodes.isLocalNodeElectedMaster() || localExecute(request)) {
|
||||||
// check for block, if blocked, retry, else, execute locally
|
// check for block, if blocked, retry, else, execute locally
|
||||||
|
@ -144,8 +140,8 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
if (!blockException.retryable()) {
|
if (!blockException.retryable()) {
|
||||||
listener.onFailure(blockException);
|
listener.onFailure(blockException);
|
||||||
} else {
|
} else {
|
||||||
logger.trace("can't execute due to a cluster block, retrying", blockException);
|
logger.debug("can't execute due to a cluster block, retrying", blockException);
|
||||||
retry(blockException, newState -> {
|
retry(clusterState, blockException, newState -> {
|
||||||
try {
|
try {
|
||||||
ClusterBlockException newException = checkBlock(request, newState);
|
ClusterBlockException newException = checkBlock(request, newState);
|
||||||
return (newException == null || !newException.retryable());
|
return (newException == null || !newException.retryable());
|
||||||
|
@ -161,7 +157,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
if (t instanceof FailedToCommitClusterStateException || t instanceof NotMasterException) {
|
if (t instanceof FailedToCommitClusterStateException || t instanceof NotMasterException) {
|
||||||
logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or " +
|
logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or " +
|
||||||
"stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
"stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
||||||
retry(t, masterChangePredicate);
|
retryOnMasterChange(clusterState, t);
|
||||||
} else {
|
} else {
|
||||||
delegatedListener.onFailure(t);
|
delegatedListener.onFailure(t);
|
||||||
}
|
}
|
||||||
|
@ -172,7 +168,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
} else {
|
} else {
|
||||||
if (nodes.getMasterNode() == null) {
|
if (nodes.getMasterNode() == null) {
|
||||||
logger.debug("no known master node, scheduling a retry");
|
logger.debug("no known master node, scheduling a retry");
|
||||||
retry(null, masterChangePredicate);
|
retryOnMasterChange(clusterState, null);
|
||||||
} else {
|
} else {
|
||||||
DiscoveryNode masterNode = nodes.getMasterNode();
|
DiscoveryNode masterNode = nodes.getMasterNode();
|
||||||
final String actionName = getMasterActionName(masterNode);
|
final String actionName = getMasterActionName(masterNode);
|
||||||
|
@ -187,7 +183,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
logger.debug("connection exception while trying to forward request with action name [{}] to " +
|
logger.debug("connection exception while trying to forward request with action name [{}] to " +
|
||||||
"master node [{}], scheduling a retry. Error: [{}]",
|
"master node [{}], scheduling a retry. Error: [{}]",
|
||||||
actionName, nodes.getMasterNode(), exp.getDetailedMessage());
|
actionName, nodes.getMasterNode(), exp.getDetailedMessage());
|
||||||
retry(cause, masterChangePredicate);
|
retryOnMasterChange(clusterState, cause);
|
||||||
} else {
|
} else {
|
||||||
listener.onFailure(exp);
|
listener.onFailure(exp);
|
||||||
}
|
}
|
||||||
|
@ -200,7 +196,21 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void retry(final Throwable failure, final Predicate<ClusterState> statePredicate) {
|
private void retryOnMasterChange(ClusterState state, Throwable failure) {
|
||||||
|
retry(state, failure, MasterNodeChangePredicate.build(state));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void retry(ClusterState state, final Throwable failure, final Predicate<ClusterState> statePredicate) {
|
||||||
|
if (observer == null) {
|
||||||
|
final long remainingTimeoutMS = request.masterNodeTimeout().millis() - (threadPool.relativeTimeInMillis() - startTime);
|
||||||
|
if (remainingTimeoutMS <= 0) {
|
||||||
|
logger.debug(() -> new ParameterizedMessage("timed out before retrying [{}] after failure", actionName), failure);
|
||||||
|
listener.onFailure(new MasterNotDiscoveredException(failure));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.observer = new ClusterStateObserver(
|
||||||
|
state, clusterService, TimeValue.timeValueMillis(remainingTimeoutMS), logger, threadPool.getThreadContext());
|
||||||
|
}
|
||||||
observer.waitForNextChange(
|
observer.waitForNextChange(
|
||||||
new ClusterStateObserver.Listener() {
|
new ClusterStateObserver.Listener() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -22,7 +22,9 @@ package org.elasticsearch.transport;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -33,6 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
public class InboundHandler {
|
public class InboundHandler {
|
||||||
|
|
||||||
|
@ -79,6 +82,9 @@ public class InboundHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Empty stream constant to avoid instantiating a new stream for empty messages.
|
||||||
|
private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES));
|
||||||
|
|
||||||
private void messageReceived(TcpChannel channel, InboundMessage message) throws IOException {
|
private void messageReceived(TcpChannel channel, InboundMessage message) throws IOException {
|
||||||
final InetSocketAddress remoteAddress = channel.getRemoteAddress();
|
final InetSocketAddress remoteAddress = channel.getRemoteAddress();
|
||||||
final Header header = message.getHeader();
|
final Header header = message.getHeader();
|
||||||
|
@ -94,8 +100,6 @@ public class InboundHandler {
|
||||||
} else {
|
} else {
|
||||||
// Responses do not support short circuiting currently
|
// Responses do not support short circuiting currently
|
||||||
assert message.isShortCircuit() == false;
|
assert message.isShortCircuit() == false;
|
||||||
final StreamInput streamInput = namedWriteableStream(message.openOrGetStreamInput());
|
|
||||||
assertRemoteVersion(streamInput, header.getVersion());
|
|
||||||
final TransportResponseHandler<?> handler;
|
final TransportResponseHandler<?> handler;
|
||||||
long requestId = header.getRequestId();
|
long requestId = header.getRequestId();
|
||||||
if (header.isHandshake()) {
|
if (header.isHandshake()) {
|
||||||
|
@ -111,6 +115,10 @@ public class InboundHandler {
|
||||||
}
|
}
|
||||||
// ignore if its null, the service logs it
|
// ignore if its null, the service logs it
|
||||||
if (handler != null) {
|
if (handler != null) {
|
||||||
|
final StreamInput streamInput;
|
||||||
|
if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) {
|
||||||
|
streamInput = namedWriteableStream(message.openOrGetStreamInput());
|
||||||
|
assertRemoteVersion(streamInput, header.getVersion());
|
||||||
if (header.isError()) {
|
if (header.isError()) {
|
||||||
handlerResponseError(streamInput, handler);
|
handlerResponseError(streamInput, handler);
|
||||||
} else {
|
} else {
|
||||||
|
@ -120,8 +128,13 @@ public class InboundHandler {
|
||||||
final int nextByte = streamInput.read();
|
final int nextByte = streamInput.read();
|
||||||
// calling read() is useful to make sure the message is fully read, even if there is an EOS marker
|
// calling read() is useful to make sure the message is fully read, even if there is an EOS marker
|
||||||
if (nextByte != -1) {
|
if (nextByte != -1) {
|
||||||
throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler ["
|
throw new IllegalStateException("Message not fully read (response) for requestId ["
|
||||||
+ handler + "], error [" + header.isError() + "]; resetting");
|
+ requestId + "], handler [" + handler + "], error [" + header.isError()
|
||||||
|
+ "]; resetting");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert header.isError() == false;
|
||||||
|
handleResponse(remoteAddress, EMPTY_STREAM_INPUT, handler);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -173,12 +186,20 @@ public class InboundHandler {
|
||||||
throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action ["
|
throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action ["
|
||||||
+ action + "], available [" + stream.available() + "]; resetting");
|
+ action + "], available [" + stream.available() + "]; resetting");
|
||||||
}
|
}
|
||||||
threadPool.executor(reg.getExecutor()).execute(new RequestHandler<>(reg, request, transportChannel));
|
final String executor = reg.getExecutor();
|
||||||
|
if (ThreadPool.Names.SAME.equals(executor)) {
|
||||||
|
try {
|
||||||
|
reg.processMessageReceived(request, transportChannel);
|
||||||
|
} catch (Exception e) {
|
||||||
|
sendErrorResponse(reg.getAction(), transportChannel, e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
threadPool.executor(executor).execute(new RequestHandler<>(reg, request, transportChannel));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
sendErrorResponse(action, transportChannel, e);
|
sendErrorResponse(action, transportChannel, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,17 +223,20 @@ public class InboundHandler {
|
||||||
"Failed to deserialize response from handler [" + handler + "]", e));
|
"Failed to deserialize response from handler [" + handler + "]", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
threadPool.executor(handler.executor()).execute(new AbstractRunnable() {
|
final String executor = handler.executor();
|
||||||
@Override
|
if (ThreadPool.Names.SAME.equals(executor)) {
|
||||||
public void onFailure(Exception e) {
|
doHandleResponse(handler, response);
|
||||||
handleException(handler, new ResponseHandlerFailureTransportException(e));
|
} else {
|
||||||
|
threadPool.executor(executor).execute(() -> doHandleResponse(handler, response));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private <T extends TransportResponse> void doHandleResponse(TransportResponseHandler<T> handler, T response) {
|
||||||
protected void doRun() {
|
try {
|
||||||
handler.handleResponse(response);
|
handler.handleResponse(response);
|
||||||
|
} catch (Exception e) {
|
||||||
|
handleException(handler, new ResponseHandlerFailureTransportException(e));
|
||||||
}
|
}
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void handlerResponseError(StreamInput stream, final TransportResponseHandler<?> handler) {
|
private void handlerResponseError(StreamInput stream, final TransportResponseHandler<?> handler) {
|
||||||
|
|
Loading…
Reference in New Issue