ARTEMIS-1025 Improve flow control on NettyConnection
(cherry picked from commit 27cfb2d902
)
This commit is contained in:
parent
7929fff893
commit
55f5124932
|
@ -17,22 +17,21 @@
|
||||||
package org.apache.activemq.artemis.core.remoting.impl.netty;
|
package org.apache.activemq.artemis.core.remoting.impl.netty;
|
||||||
|
|
||||||
import java.net.SocketAddress;
|
import java.net.SocketAddress;
|
||||||
import java.util.Deque;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.Semaphore;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
import java.util.concurrent.locks.LockSupport;
|
||||||
|
|
||||||
import io.netty.buffer.ByteBuf;
|
import io.netty.buffer.ByteBuf;
|
||||||
import io.netty.buffer.PooledByteBufAllocator;
|
|
||||||
import io.netty.channel.Channel;
|
import io.netty.channel.Channel;
|
||||||
import io.netty.channel.ChannelFuture;
|
import io.netty.channel.ChannelFuture;
|
||||||
import io.netty.channel.ChannelFutureListener;
|
import io.netty.channel.ChannelFutureListener;
|
||||||
import io.netty.channel.ChannelPromise;
|
import io.netty.channel.ChannelPromise;
|
||||||
import io.netty.channel.EventLoop;
|
import io.netty.channel.EventLoop;
|
||||||
import io.netty.handler.ssl.SslHandler;
|
import io.netty.handler.ssl.SslHandler;
|
||||||
import io.netty.util.concurrent.GenericFutureListener;
|
|
||||||
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
|
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
|
||||||
import org.apache.activemq.artemis.api.core.ActiveMQBuffers;
|
|
||||||
import org.apache.activemq.artemis.api.core.ActiveMQInterruptedException;
|
import org.apache.activemq.artemis.api.core.ActiveMQInterruptedException;
|
||||||
import org.apache.activemq.artemis.api.core.TransportConfiguration;
|
import org.apache.activemq.artemis.api.core.TransportConfiguration;
|
||||||
import org.apache.activemq.artemis.core.buffers.impl.ChannelBufferWrapper;
|
import org.apache.activemq.artemis.core.buffers.impl.ChannelBufferWrapper;
|
||||||
|
@ -43,43 +42,41 @@ import org.apache.activemq.artemis.spi.core.remoting.BaseConnectionLifeCycleList
|
||||||
import org.apache.activemq.artemis.spi.core.remoting.Connection;
|
import org.apache.activemq.artemis.spi.core.remoting.Connection;
|
||||||
import org.apache.activemq.artemis.spi.core.remoting.ReadyListener;
|
import org.apache.activemq.artemis.spi.core.remoting.ReadyListener;
|
||||||
import org.apache.activemq.artemis.utils.IPV6Util;
|
import org.apache.activemq.artemis.utils.IPV6Util;
|
||||||
|
import org.jboss.logging.Logger;
|
||||||
|
|
||||||
public class NettyConnection implements Connection {
|
public class NettyConnection implements Connection {
|
||||||
|
|
||||||
// Constants -----------------------------------------------------
|
private static final Logger logger = Logger.getLogger(NettyConnection.class);
|
||||||
private static final int BATCHING_BUFFER_SIZE = 8192;
|
|
||||||
|
|
||||||
// Attributes ----------------------------------------------------
|
private static final int DEFAULT_BATCH_BYTES = Integer.getInteger("io.netty.batch.bytes", 8192);
|
||||||
|
private static final int DEFAULT_WAIT_MILLIS = 10_000;
|
||||||
|
|
||||||
protected final Channel channel;
|
protected final Channel channel;
|
||||||
|
|
||||||
private boolean closed;
|
|
||||||
|
|
||||||
private final BaseConnectionLifeCycleListener listener;
|
private final BaseConnectionLifeCycleListener listener;
|
||||||
|
|
||||||
private final boolean batchingEnabled;
|
|
||||||
|
|
||||||
private final boolean directDeliver;
|
private final boolean directDeliver;
|
||||||
|
|
||||||
private volatile ActiveMQBuffer batchBuffer;
|
|
||||||
|
|
||||||
private final Map<String, Object> configuration;
|
private final Map<String, Object> configuration;
|
||||||
|
|
||||||
private final Semaphore writeLock = new Semaphore(1);
|
|
||||||
|
|
||||||
private RemotingConnection protocolConnection;
|
|
||||||
|
|
||||||
private boolean ready = true;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* if {@link #isWritable(ReadyListener)} returns false, we add a callback
|
* if {@link #isWritable(ReadyListener)} returns false, we add a callback
|
||||||
* here for when the connection (or Netty Channel) becomes available again.
|
* here for when the connection (or Netty Channel) becomes available again.
|
||||||
*/
|
*/
|
||||||
private final Deque<ReadyListener> readyListeners = new LinkedList<>();
|
private final List<ReadyListener> readyListeners = new ArrayList<>();
|
||||||
|
private final ThreadLocal<ArrayList<ReadyListener>> localListenersPool = ThreadLocal.withInitial(ArrayList::new);
|
||||||
|
|
||||||
// Static --------------------------------------------------------
|
private final boolean batchingEnabled;
|
||||||
|
private final int writeBufferHighWaterMark;
|
||||||
|
private final int batchLimit;
|
||||||
|
|
||||||
// Constructors --------------------------------------------------
|
/**
|
||||||
|
* This counter is splitted in 2 variables to write it with less performance
|
||||||
|
* impact: no volatile get is required to update its value
|
||||||
|
*/
|
||||||
|
private final AtomicLong pendingWritesOnEventLoopView = new AtomicLong();
|
||||||
|
private long pendingWritesOnEventLoop = 0;
|
||||||
|
|
||||||
|
private boolean closed;
|
||||||
|
private RemotingConnection protocolConnection;
|
||||||
|
|
||||||
|
private boolean ready = true;
|
||||||
|
|
||||||
public NettyConnection(final Map<String, Object> configuration,
|
public NettyConnection(final Map<String, Object> configuration,
|
||||||
final Channel channel,
|
final Channel channel,
|
||||||
|
@ -92,28 +89,72 @@ public class NettyConnection implements Connection {
|
||||||
|
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
|
|
||||||
|
this.directDeliver = directDeliver;
|
||||||
|
|
||||||
this.batchingEnabled = batchingEnabled;
|
this.batchingEnabled = batchingEnabled;
|
||||||
|
|
||||||
this.directDeliver = directDeliver;
|
this.writeBufferHighWaterMark = this.channel.config().getWriteBufferHighWaterMark();
|
||||||
|
|
||||||
|
this.batchLimit = batchingEnabled ? Math.min(this.writeBufferHighWaterMark, DEFAULT_BATCH_BYTES) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Public --------------------------------------------------------
|
private static void waitFor(ChannelPromise promise, long millis) {
|
||||||
|
try {
|
||||||
|
final boolean completed = promise.await(millis);
|
||||||
|
if (!completed) {
|
||||||
|
ActiveMQClientLogger.LOGGER.timeoutFlushingPacket();
|
||||||
|
}
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new ActiveMQInterruptedException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public Channel getNettyChannel() {
|
/**
|
||||||
|
* Returns an estimation of the current size of the write buffer in the channel.
|
||||||
|
* To obtain a more precise value is necessary to use the unsafe API of the channel to
|
||||||
|
* call the {@link io.netty.channel.ChannelOutboundBuffer#totalPendingWriteBytes()}.
|
||||||
|
* Anyway, both these values are subject to concurrent modifications.
|
||||||
|
*/
|
||||||
|
private static int batchBufferSize(Channel channel, int writeBufferHighWaterMark) {
|
||||||
|
//Channel::bytesBeforeUnwritable is performing a volatile load
|
||||||
|
//this is the reason why writeBufferHighWaterMark is passed as an argument
|
||||||
|
final int bytesBeforeUnwritable = (int) channel.bytesBeforeUnwritable();
|
||||||
|
assert bytesBeforeUnwritable >= 0;
|
||||||
|
final int writtenBytes = writeBufferHighWaterMark - bytesBeforeUnwritable;
|
||||||
|
assert writtenBytes >= 0;
|
||||||
|
return writtenBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final int pendingWritesOnChannel() {
|
||||||
|
return batchBufferSize(this.channel, this.writeBufferHighWaterMark);
|
||||||
|
}
|
||||||
|
|
||||||
|
public final long pendingWritesOnEventLoop() {
|
||||||
|
final EventLoop eventLoop = channel.eventLoop();
|
||||||
|
final boolean inEventLoop = eventLoop.inEventLoop();
|
||||||
|
final long pendingWritesOnEventLoop;
|
||||||
|
if (inEventLoop) {
|
||||||
|
pendingWritesOnEventLoop = this.pendingWritesOnEventLoop;
|
||||||
|
} else {
|
||||||
|
pendingWritesOnEventLoop = pendingWritesOnEventLoopView.get();
|
||||||
|
}
|
||||||
|
return pendingWritesOnEventLoop;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final Channel getNettyChannel() {
|
||||||
return channel;
|
return channel;
|
||||||
}
|
}
|
||||||
// Connection implementation ----------------------------
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setAutoRead(boolean autoRead) {
|
public final void setAutoRead(boolean autoRead) {
|
||||||
channel.config().setAutoRead(autoRead);
|
channel.config().setAutoRead(autoRead);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isWritable(ReadyListener callback) {
|
public final boolean isWritable(ReadyListener callback) {
|
||||||
synchronized (readyListeners) {
|
synchronized (readyListeners) {
|
||||||
if (!ready) {
|
if (!ready) {
|
||||||
readyListeners.push(callback);
|
readyListeners.add(callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ready;
|
return ready;
|
||||||
|
@ -121,40 +162,44 @@ public class NettyConnection implements Connection {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void fireReady(final boolean ready) {
|
public final void fireReady(final boolean ready) {
|
||||||
LinkedList<ReadyListener> readyToCall = null;
|
final ArrayList<ReadyListener> readyToCall = localListenersPool.get();
|
||||||
synchronized (readyListeners) {
|
synchronized (readyListeners) {
|
||||||
this.ready = ready;
|
this.ready = ready;
|
||||||
|
|
||||||
if (ready) {
|
if (ready) {
|
||||||
for (;;) {
|
final int size = this.readyListeners.size();
|
||||||
ReadyListener readyListener = readyListeners.poll();
|
readyToCall.ensureCapacity(size);
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < size; i++) {
|
||||||
|
final ReadyListener readyListener = readyListeners.get(i);
|
||||||
if (readyListener == null) {
|
if (readyListener == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (readyToCall == null) {
|
|
||||||
readyToCall = new LinkedList<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
readyToCall.add(readyListener);
|
readyToCall.add(readyListener);
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
readyListeners.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (readyToCall != null) {
|
|
||||||
for (ReadyListener readyListener : readyToCall) {
|
|
||||||
try {
|
try {
|
||||||
|
final int size = readyToCall.size();
|
||||||
|
for (int i = 0; i < size; i++) {
|
||||||
|
try {
|
||||||
|
final ReadyListener readyListener = readyToCall.get(i);
|
||||||
readyListener.readyForWriting();
|
readyListener.readyForWriting();
|
||||||
} catch (Throwable logOnly) {
|
} catch (Throwable logOnly) {
|
||||||
ActiveMQClientLogger.LOGGER.warn(logOnly.getMessage(), logOnly);
|
ActiveMQClientLogger.LOGGER.warn(logOnly.getMessage(), logOnly);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
readyToCall.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void forceClose() {
|
public final void forceClose() {
|
||||||
if (channel != null) {
|
if (channel != null) {
|
||||||
try {
|
try {
|
||||||
channel.close();
|
channel.close();
|
||||||
|
@ -169,38 +214,35 @@ public class NettyConnection implements Connection {
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
public Channel getChannel() {
|
public final Channel getChannel() {
|
||||||
return channel;
|
return channel;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RemotingConnection getProtocolConnection() {
|
public final RemotingConnection getProtocolConnection() {
|
||||||
return protocolConnection;
|
return protocolConnection;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setProtocolConnection(RemotingConnection protocolConnection) {
|
public final void setProtocolConnection(RemotingConnection protocolConnection) {
|
||||||
this.protocolConnection = protocolConnection;
|
this.protocolConnection = protocolConnection;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() {
|
public final void close() {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
final SslHandler sslHandler = (SslHandler) channel.pipeline().get("ssl");
|
|
||||||
EventLoop eventLoop = channel.eventLoop();
|
EventLoop eventLoop = channel.eventLoop();
|
||||||
boolean inEventLoop = eventLoop.inEventLoop();
|
boolean inEventLoop = eventLoop.inEventLoop();
|
||||||
//if we are in an event loop we need to close the channel after the writes have finished
|
//if we are in an event loop we need to close the channel after the writes have finished
|
||||||
if (!inEventLoop) {
|
if (!inEventLoop) {
|
||||||
|
final SslHandler sslHandler = (SslHandler) channel.pipeline().get("ssl");
|
||||||
closeSSLAndChannel(sslHandler, channel, false);
|
closeSSLAndChannel(sslHandler, channel, false);
|
||||||
} else {
|
} else {
|
||||||
eventLoop.execute(new Runnable() {
|
eventLoop.execute(() -> {
|
||||||
@Override
|
final SslHandler sslHandler = (SslHandler) channel.pipeline().get("ssl");
|
||||||
public void run() {
|
|
||||||
closeSSLAndChannel(sslHandler, channel, true);
|
closeSSLAndChannel(sslHandler, channel, true);
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,143 +253,209 @@ public class NettyConnection implements Connection {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActiveMQBuffer createTransportBuffer(final int size) {
|
public ActiveMQBuffer createTransportBuffer(final int size) {
|
||||||
return new ChannelBufferWrapper(PooledByteBufAllocator.DEFAULT.directBuffer(size), true);
|
try {
|
||||||
|
return new ChannelBufferWrapper(channel.alloc().directBuffer(size), true);
|
||||||
|
} catch (OutOfMemoryError oom) {
|
||||||
|
final long totalPendingWriteBytes = batchBufferSize(this.channel, this.writeBufferHighWaterMark);
|
||||||
|
// I'm not using the ActiveMQLogger framework here, as I wanted the class name to be very specific here
|
||||||
|
logger.warn("Trying to allocate " + size + " bytes, System is throwing OutOfMemoryError on NettyConnection " + this + ", there are currently " + "pendingWrites: [NETTY] -> " + totalPendingWriteBytes + "[EVENT LOOP] -> " + pendingWritesOnEventLoopView.get() + " causes: " + oom.getMessage(), oom);
|
||||||
|
throw oom;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Object getID() {
|
public final Object getID() {
|
||||||
// TODO: Think of it
|
// TODO: Think of it
|
||||||
return channel.hashCode();
|
return channel.hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is called periodically to flush the batch buffer
|
// This is called periodically to flush the batch buffer
|
||||||
@Override
|
@Override
|
||||||
public void checkFlushBatchBuffer() {
|
public final void checkFlushBatchBuffer() {
|
||||||
if (!batchingEnabled) {
|
if (this.batchingEnabled) {
|
||||||
return;
|
//perform the flush only if necessary
|
||||||
}
|
final int batchBufferSize = batchBufferSize(this.channel, this.writeBufferHighWaterMark);
|
||||||
|
if (batchBufferSize > 0) {
|
||||||
if (writeLock.tryAcquire()) {
|
this.channel.flush();
|
||||||
try {
|
|
||||||
if (batchBuffer != null && batchBuffer.readable()) {
|
|
||||||
channel.writeAndFlush(batchBuffer.byteBuf());
|
|
||||||
|
|
||||||
batchBuffer = createTransportBuffer(BATCHING_BUFFER_SIZE);
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
writeLock.release();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(final ActiveMQBuffer buffer) {
|
public final void write(final ActiveMQBuffer buffer) {
|
||||||
write(buffer, false, false);
|
write(buffer, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(ActiveMQBuffer buffer, final boolean flush, final boolean batched) {
|
public final void write(ActiveMQBuffer buffer, final boolean flush, final boolean batched) {
|
||||||
write(buffer, flush, batched, null);
|
write(buffer, flush, batched, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(ActiveMQBuffer buffer,
|
public final boolean blockUntilWritable(final int requiredCapacity, final long timeout, final TimeUnit timeUnit) {
|
||||||
|
final boolean isAllowedToBlock = isAllowedToBlock();
|
||||||
|
if (!isAllowedToBlock) {
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("Calling blockUntilWritable using a thread where it's not allowed");
|
||||||
|
}
|
||||||
|
return canWrite(requiredCapacity);
|
||||||
|
} else {
|
||||||
|
final long timeoutNanos = timeUnit.toNanos(timeout);
|
||||||
|
final long deadline = System.nanoTime() + timeoutNanos;
|
||||||
|
//choose wait time unit size
|
||||||
|
final long parkNanos;
|
||||||
|
//if is requested to wait more than a millisecond than we could use
|
||||||
|
if (timeoutNanos >= 1_000_000L) {
|
||||||
|
parkNanos = 100_000L;
|
||||||
|
} else {
|
||||||
|
//reduce it doesn't make sense, only a spin loop could be enough precise with the most OS
|
||||||
|
parkNanos = 1000L;
|
||||||
|
}
|
||||||
|
boolean canWrite;
|
||||||
|
while (!(canWrite = canWrite(requiredCapacity)) && System.nanoTime() < deadline) {
|
||||||
|
LockSupport.parkNanos(parkNanos);
|
||||||
|
}
|
||||||
|
return canWrite;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isAllowedToBlock() {
|
||||||
|
final EventLoop eventLoop = channel.eventLoop();
|
||||||
|
final boolean inEventLoop = eventLoop.inEventLoop();
|
||||||
|
return !inEventLoop;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean canWrite(final int requiredCapacity) {
|
||||||
|
//evaluate if the write request could be taken:
|
||||||
|
//there is enough space in the write buffer?
|
||||||
|
//The pending writes on event loop will eventually go into the Netty write buffer, hence consider them
|
||||||
|
//as part of the heuristic!
|
||||||
|
final long pendingWritesOnEventLoop = this.pendingWritesOnEventLoop();
|
||||||
|
final long totalPendingWrites = pendingWritesOnEventLoop + this.pendingWritesOnChannel();
|
||||||
|
final boolean canWrite;
|
||||||
|
if (requiredCapacity > this.writeBufferHighWaterMark) {
|
||||||
|
canWrite = totalPendingWrites == 0;
|
||||||
|
} else {
|
||||||
|
canWrite = (totalPendingWrites + requiredCapacity) <= this.writeBufferHighWaterMark;
|
||||||
|
}
|
||||||
|
return canWrite;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final void write(ActiveMQBuffer buffer,
|
||||||
final boolean flush,
|
final boolean flush,
|
||||||
final boolean batched,
|
final boolean batched,
|
||||||
final ChannelFutureListener futureListener) {
|
final ChannelFutureListener futureListener) {
|
||||||
|
final int readableBytes = buffer.readableBytes();
|
||||||
try {
|
if (logger.isDebugEnabled()) {
|
||||||
writeLock.acquire();
|
final int remainingBytes = this.writeBufferHighWaterMark - readableBytes;
|
||||||
|
if (remainingBytes < 0) {
|
||||||
try {
|
logger.debug("a write request is exceeding by " + (-remainingBytes) +
|
||||||
if (batchBuffer == null && batchingEnabled && batched && !flush) {
|
" bytes the writeBufferHighWaterMark size [ " + this.writeBufferHighWaterMark +
|
||||||
// Lazily create batch buffer
|
" ] : consider to set it at least of " + readableBytes + " bytes");
|
||||||
|
|
||||||
batchBuffer = ActiveMQBuffers.dynamicBuffer(BATCHING_BUFFER_SIZE);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (batchBuffer != null) {
|
//no need to lock because the Netty's channel is thread-safe
|
||||||
batchBuffer.writeBytes(buffer, 0, buffer.writerIndex());
|
//and the order of write is ensured by the order of the write calls
|
||||||
|
final EventLoop eventLoop = channel.eventLoop();
|
||||||
if (batchBuffer.writerIndex() >= BATCHING_BUFFER_SIZE || !batched || flush) {
|
final boolean inEventLoop = eventLoop.inEventLoop();
|
||||||
// If the batch buffer is full or it's flush param or not batched then flush the buffer
|
if (!inEventLoop) {
|
||||||
|
writeNotInEventLoop(buffer, flush, batched, futureListener);
|
||||||
buffer = batchBuffer;
|
|
||||||
} else {
|
} else {
|
||||||
return;
|
// OLD COMMENT:
|
||||||
}
|
// create a task which will be picked up by the eventloop and trigger the write.
|
||||||
|
// This is mainly needed as this method is triggered by different threads for the same channel.
|
||||||
if (!batched || flush) {
|
// if we not do this we may produce out of order writes.
|
||||||
batchBuffer = null;
|
// NOTE:
|
||||||
} else {
|
// the submitted task does not effect in any way the current written size in the batch
|
||||||
// Create a new buffer
|
// until the loop will process it, leading to a longer life for the ActiveMQBuffer buffer!!!
|
||||||
|
// To solve it, will be necessary to manually perform the count of the current batch instead of rely on the
|
||||||
batchBuffer = ActiveMQBuffers.dynamicBuffer(BATCHING_BUFFER_SIZE);
|
// Channel:Config::writeBufferHighWaterMark value.
|
||||||
|
this.pendingWritesOnEventLoop += readableBytes;
|
||||||
|
this.pendingWritesOnEventLoopView.lazySet(pendingWritesOnEventLoop);
|
||||||
|
eventLoop.execute(() -> {
|
||||||
|
this.pendingWritesOnEventLoop -= readableBytes;
|
||||||
|
this.pendingWritesOnEventLoopView.lazySet(pendingWritesOnEventLoop);
|
||||||
|
writeInEventLoop(buffer, flush, batched, futureListener);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// depending on if we need to flush or not we can use a voidPromise or
|
private void writeNotInEventLoop(ActiveMQBuffer buffer,
|
||||||
// use a normal promise
|
final boolean flush,
|
||||||
final ByteBuf buf = buffer.byteBuf();
|
final boolean batched,
|
||||||
|
final ChannelFutureListener futureListener) {
|
||||||
|
final Channel channel = this.channel;
|
||||||
final ChannelPromise promise;
|
final ChannelPromise promise;
|
||||||
if (flush || futureListener != null) {
|
if (flush || (futureListener != null)) {
|
||||||
promise = channel.newPromise();
|
promise = channel.newPromise();
|
||||||
} else {
|
} else {
|
||||||
promise = channel.voidPromise();
|
promise = channel.voidPromise();
|
||||||
}
|
}
|
||||||
|
final ChannelFuture future;
|
||||||
EventLoop eventLoop = channel.eventLoop();
|
final ByteBuf bytes = buffer.byteBuf();
|
||||||
boolean inEventLoop = eventLoop.inEventLoop();
|
final int readableBytes = bytes.readableBytes();
|
||||||
if (!inEventLoop) {
|
assert readableBytes >= 0;
|
||||||
|
final int writeBatchSize = this.batchLimit;
|
||||||
|
final boolean batchingEnabled = this.batchingEnabled;
|
||||||
|
if (batchingEnabled && batched && !flush && readableBytes < writeBatchSize) {
|
||||||
|
future = writeBatch(bytes, readableBytes, promise);
|
||||||
|
} else {
|
||||||
|
future = channel.writeAndFlush(bytes, promise);
|
||||||
|
}
|
||||||
if (futureListener != null) {
|
if (futureListener != null) {
|
||||||
channel.writeAndFlush(buf, promise).addListener(futureListener);
|
future.addListener(futureListener);
|
||||||
} else {
|
|
||||||
channel.writeAndFlush(buf, promise);
|
|
||||||
}
|
}
|
||||||
} else {
|
if (flush) {
|
||||||
// create a task which will be picked up by the eventloop and trigger the write.
|
//NOTE: this code path seems used only on RemotingConnection::disconnect
|
||||||
// This is mainly needed as this method is triggered by different threads for the same channel.
|
waitFor(promise, DEFAULT_WAIT_MILLIS);
|
||||||
// if we not do this we may produce out of order writes.
|
}
|
||||||
final Runnable task = new Runnable() {
|
}
|
||||||
@Override
|
|
||||||
public void run() {
|
private void writeInEventLoop(ActiveMQBuffer buffer,
|
||||||
|
final boolean flush,
|
||||||
|
final boolean batched,
|
||||||
|
final ChannelFutureListener futureListener) {
|
||||||
|
//no need to lock because the Netty's channel is thread-safe
|
||||||
|
//and the order of write is ensured by the order of the write calls
|
||||||
|
final ChannelPromise promise;
|
||||||
if (futureListener != null) {
|
if (futureListener != null) {
|
||||||
channel.writeAndFlush(buf, promise).addListener(futureListener);
|
promise = channel.newPromise();
|
||||||
} else {
|
} else {
|
||||||
channel.writeAndFlush(buf, promise);
|
promise = channel.voidPromise();
|
||||||
}
|
}
|
||||||
|
final ChannelFuture future;
|
||||||
|
final ByteBuf bytes = buffer.byteBuf();
|
||||||
|
final int readableBytes = bytes.readableBytes();
|
||||||
|
final int writeBatchSize = this.batchLimit;
|
||||||
|
if (this.batchingEnabled && batched && !flush && readableBytes < writeBatchSize) {
|
||||||
|
future = writeBatch(bytes, readableBytes, promise);
|
||||||
|
} else {
|
||||||
|
future = channel.writeAndFlush(bytes, promise);
|
||||||
|
}
|
||||||
|
if (futureListener != null) {
|
||||||
|
future.addListener(futureListener);
|
||||||
}
|
}
|
||||||
};
|
|
||||||
// execute the task on the eventloop
|
|
||||||
eventLoop.execute(task);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// only try to wait if not in the eventloop otherwise we will produce a deadlock
|
private ChannelFuture writeBatch(final ByteBuf bytes, final int readableBytes, final ChannelPromise promise) {
|
||||||
if (flush && !inEventLoop) {
|
final int batchBufferSize = batchBufferSize(channel, this.writeBufferHighWaterMark);
|
||||||
while (true) {
|
final int nextBatchSize = batchBufferSize + readableBytes;
|
||||||
try {
|
if (nextBatchSize > batchLimit) {
|
||||||
boolean ok = promise.await(10000);
|
//request to flush before writing, to create the chance to make the channel writable again
|
||||||
|
channel.flush();
|
||||||
if (!ok) {
|
//let netty use its write batching ability
|
||||||
ActiveMQClientLogger.LOGGER.timeoutFlushingPacket();
|
return channel.write(bytes, promise);
|
||||||
}
|
} else if (nextBatchSize == batchLimit) {
|
||||||
|
return channel.writeAndFlush(bytes, promise);
|
||||||
break;
|
} else {
|
||||||
} catch (InterruptedException e) {
|
//let netty use its write batching ability
|
||||||
throw new ActiveMQInterruptedException(e);
|
return channel.write(bytes, promise);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
writeLock.release();
|
|
||||||
}
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new ActiveMQInterruptedException(e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getRemoteAddress() {
|
public final String getRemoteAddress() {
|
||||||
SocketAddress address = channel.remoteAddress();
|
SocketAddress address = channel.remoteAddress();
|
||||||
if (address == null) {
|
if (address == null) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -356,7 +464,7 @@ public class NettyConnection implements Connection {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getLocalAddress() {
|
public final String getLocalAddress() {
|
||||||
SocketAddress address = channel.localAddress();
|
SocketAddress address = channel.localAddress();
|
||||||
if (address == null) {
|
if (address == null) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -364,18 +472,18 @@ public class NettyConnection implements Connection {
|
||||||
return "tcp://" + IPV6Util.encloseHost(address.toString());
|
return "tcp://" + IPV6Util.encloseHost(address.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isDirectDeliver() {
|
public final boolean isDirectDeliver() {
|
||||||
return directDeliver;
|
return directDeliver;
|
||||||
}
|
}
|
||||||
|
|
||||||
//never allow this
|
//never allow this
|
||||||
@Override
|
@Override
|
||||||
public ActiveMQPrincipal getDefaultActiveMQPrincipal() {
|
public final ActiveMQPrincipal getDefaultActiveMQPrincipal() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TransportConfiguration getConnectorConfig() {
|
public final TransportConfiguration getConnectorConfig() {
|
||||||
if (configuration != null) {
|
if (configuration != null) {
|
||||||
return new TransportConfiguration(NettyConnectorFactory.class.getName(), this.configuration);
|
return new TransportConfiguration(NettyConnectorFactory.class.getName(), this.configuration);
|
||||||
} else {
|
} else {
|
||||||
|
@ -384,46 +492,36 @@ public class NettyConnection implements Connection {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isUsingProtocolHandling() {
|
public final boolean isUsingProtocolHandling() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Public --------------------------------------------------------
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public final String toString() {
|
||||||
return super.toString() + "[local= " + channel.localAddress() + ", remote=" + channel.remoteAddress() + "]";
|
return super.toString() + "[local= " + channel.localAddress() + ", remote=" + channel.remoteAddress() + "]";
|
||||||
}
|
}
|
||||||
|
|
||||||
// Package protected ---------------------------------------------
|
|
||||||
|
|
||||||
// Protected -----------------------------------------------------
|
|
||||||
|
|
||||||
// Private -------------------------------------------------------
|
|
||||||
|
|
||||||
private void closeSSLAndChannel(SslHandler sslHandler, final Channel channel, boolean inEventLoop) {
|
private void closeSSLAndChannel(SslHandler sslHandler, final Channel channel, boolean inEventLoop) {
|
||||||
|
checkFlushBatchBuffer();
|
||||||
if (sslHandler != null) {
|
if (sslHandler != null) {
|
||||||
try {
|
try {
|
||||||
ChannelFuture sslCloseFuture = sslHandler.close();
|
ChannelFuture sslCloseFuture = sslHandler.close();
|
||||||
sslCloseFuture.addListener(new GenericFutureListener<ChannelFuture>() {
|
sslCloseFuture.addListener(future -> channel.close());
|
||||||
@Override
|
if (!inEventLoop && !sslCloseFuture.awaitUninterruptibly(DEFAULT_WAIT_MILLIS)) {
|
||||||
public void operationComplete(ChannelFuture future) throws Exception {
|
|
||||||
channel.close();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (!inEventLoop && !sslCloseFuture.awaitUninterruptibly(10000)) {
|
|
||||||
ActiveMQClientLogger.LOGGER.timeoutClosingSSL();
|
ActiveMQClientLogger.LOGGER.timeoutClosingSSL();
|
||||||
}
|
}
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
// ignore
|
// ignore
|
||||||
|
if (ActiveMQClientLogger.LOGGER.isTraceEnabled()) {
|
||||||
|
ActiveMQClientLogger.LOGGER.trace(t.getMessage(), t);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ChannelFuture closeFuture = channel.close();
|
ChannelFuture closeFuture = channel.close();
|
||||||
if (!inEventLoop && !closeFuture.awaitUninterruptibly(10000)) {
|
if (!inEventLoop && !closeFuture.awaitUninterruptibly(DEFAULT_WAIT_MILLIS)) {
|
||||||
ActiveMQClientLogger.LOGGER.timeoutClosingNettyChannel();
|
ActiveMQClientLogger.LOGGER.timeoutClosingNettyChannel();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Inner classes -------------------------------------------------
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.activemq.artemis.spi.core.remoting;
|
package org.apache.activemq.artemis.spi.core.remoting;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import io.netty.channel.ChannelFutureListener;
|
import io.netty.channel.ChannelFutureListener;
|
||||||
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
|
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
|
||||||
import org.apache.activemq.artemis.api.core.TransportConfiguration;
|
import org.apache.activemq.artemis.api.core.TransportConfiguration;
|
||||||
|
@ -41,6 +43,21 @@ public interface Connection {
|
||||||
|
|
||||||
boolean isWritable(ReadyListener listener);
|
boolean isWritable(ReadyListener listener);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Causes the current thread to wait until the connection can enqueue the required capacity unless the specified waiting time elapses.
|
||||||
|
* The available capacity of the connection could change concurrently hence this method is suitable to perform precise flow-control
|
||||||
|
* only in a single writer case, while its precision decrease inversely proportional with the rate and the number of concurrent writers.
|
||||||
|
* If the current thread is not allowed to block the timeout will be ignored dependently on the connection type.
|
||||||
|
*
|
||||||
|
* @param requiredCapacity the capacity in bytes to be enqueued
|
||||||
|
* @param timeout the maximum time to wait
|
||||||
|
* @param timeUnit the time unit of the timeout argument
|
||||||
|
* @return {@code true} if the connection can enqueue {@code requiredCapacity} bytes, {@code false} otherwise
|
||||||
|
*/
|
||||||
|
default boolean blockUntilWritable(final int requiredCapacity, final long timeout, final TimeUnit timeUnit) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void fireReady(boolean ready);
|
void fireReady(boolean ready);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue