diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index a196976dc12..577038a54a0 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -334,6 +334,11 @@ public class Netty4Transport extends TcpTransport { protected void closeChannels(final List channels, boolean blocking, boolean closingTransport) throws IOException { if (closingTransport) { for (Channel channel : channels) { + /* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't have a gazillion connections sitting + * in TIME_WAIT to free up resources quickly. This is really the only part where we close the connection from the server + * side otherwise the client (node) initiates the TCP closing sequence which doesn't cause these issues. Setting this + * by default from the beginning can have unexpected side-effects an should be avoided, our protocol is designed + * in a way that clients close connection which is how it should be*/ channel.config().setOption(ChannelOption.SO_LINGER, 0); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 085469059bf..56e017fc1f9 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -246,6 +246,11 @@ public class MockTcpTransport extends TcpTransport if (closingTransport) { for (MockChannel channel : channels) { if (channel.activeChannel != null) { + /* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't have a gazillion connections sitting + * in TIME_WAIT to free up resources quickly. This is really the only part where we close the connection from the server + * side otherwise the client (node) initiates the TCP closing sequence which doesn't cause these issues. Setting this + * by default from the beginning can have unexpected side-effects an should be avoided, our protocol is designed + * in a way that clients close connection which is how it should be*/ channel.activeChannel.setSoLinger(true, 0); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index b22feb56976..38ec361546e 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -102,6 +102,11 @@ public class NioTransport extends TcpTransport { protected void closeChannels(List channels, boolean blocking, boolean closingTransport) throws IOException { if (closingTransport) { for (NioChannel channel : channels) { + /* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't have a gazillion connections sitting + * in TIME_WAIT to free up resources quickly. This is really the only part where we close the connection from the server + * side otherwise the client (node) initiates the TCP closing sequence which doesn't cause these issues. Setting this + * by default from the beginning can have unexpected side-effects an should be avoided, our protocol is designed + * in a way that clients close connection which is how it should be*/ channel.getRawChannel().setOption(StandardSocketOptions.SO_LINGER, 0); } }