Change file descriptor limit to 65535 (#37537)

Some systems default to a nofile ulimit of 65535. To reduce the pain of
deploying Elasticsearch to such systems, this commit lowers the required
limit from 65536 to 65535.
This commit is contained in:
Jason Tedor 2019-01-16 17:19:12 -05:00 committed by GitHub
parent 655103de58
commit 18a3e48a4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 15 additions and 15 deletions

View File

@ -36,7 +36,7 @@ ES_STARTUP_SLEEP_TIME=5
# Specifies the maximum file descriptor number that can be opened by this process
# When using Systemd, this setting is ignored and the LimitNOFILE defined in
# /usr/lib/systemd/system/elasticsearch.service takes precedence
#MAX_OPEN_FILES=65536
#MAX_OPEN_FILES=65535
# The maximum number of bytes of memory that may be locked into RAM
# Set to "unlimited" if you use the 'bootstrap.memory_lock: true' option

View File

@ -29,7 +29,7 @@ StandardOutput=journal
StandardError=inherit
# Specifies the maximum file descriptor number that can be opened by this process
LimitNOFILE=65536
LimitNOFILE=65535
# Specifies the maximum number of processes
LimitNPROC=4096

View File

@ -39,7 +39,7 @@ ES_HOME=/usr/share/$NAME
#ES_JAVA_OPTS=
# Maximum number of open files
MAX_OPEN_FILES=65536
MAX_OPEN_FILES=65535
# Maximum amount of locked memory
#MAX_LOCKED_MEMORY=

View File

@ -33,7 +33,7 @@ fi
# Sets the default values for elasticsearch variables used in this script
ES_HOME="/usr/share/elasticsearch"
MAX_OPEN_FILES=65536
MAX_OPEN_FILES=65535
MAX_MAP_COUNT=262144
ES_PATH_CONF="${path.conf}"

View File

@ -248,7 +248,7 @@ If everything goes well with installation, you should see a bunch of messages th
[2018-09-13T12:20:05,006][INFO ][o.e.n.Node ] [localhost.localdomain] initialized
[2018-09-13T12:20:05,007][INFO ][o.e.n.Node ] [localhost.localdomain] starting ...
[2018-09-13T12:20:05,202][INFO ][o.e.t.TransportService ] [localhost.localdomain] publish_address {127.0.0.1:9300}, bound_addresses {[::1]:9300}, {127.0.0.1:9300}
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535]
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
[2018-09-13T12:20:08,355][INFO ][o.e.c.s.MasterService ] [localhost.localdomain] zen-disco-elected-as-master ([0] nodes joined)[, ], reason: master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}
[2018-09-13T12:20:08,360][INFO ][o.e.c.s.ClusterApplierService] [localhost.localdomain] master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}, reason: apply cluster state (from master [master {localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test} committed version [1] source [zen-disco-elected-as-master ([0] nodes joined)[, ]]])

View File

@ -338,7 +338,7 @@ needed, adjust them in the Daemon, or override them per container, for example
using `docker run`:
+
--
--ulimit nofile=65536:65536
--ulimit nofile=65535:65535
NOTE: One way of checking the Docker daemon defaults for the aforementioned
ulimits is by running:

View File

@ -5,7 +5,7 @@
`MAX_OPEN_FILES`::
Maximum number of open files, defaults to `65536`.
Maximum number of open files, defaults to `65535`.
`MAX_LOCKED_MEMORY`::

View File

@ -25,7 +25,7 @@ open file handles (`ulimit -n`) to 65,536, you can do the following:
[source,sh]
--------------------------------
sudo su <1>
ulimit -n 65536 <2>
ulimit -n 65535 <2>
su elasticsearch <3>
--------------------------------
<1> Become `root`.
@ -46,7 +46,7 @@ the `limits.conf` file:
[source,sh]
--------------------------------
elasticsearch - nofile 65536
elasticsearch - nofile 65535
--------------------------------
This change will only take effect the next time the `elasticsearch` user opens

View File

@ -12,15 +12,15 @@ file descriptors can be disastrous and will most probably lead to data loss.
Make sure to increase the limit on the number of open files descriptors for
the user running Elasticsearch to 65,536 or higher.
For the `.zip` and `.tar.gz` packages, set <<ulimit,`ulimit -n 65536`>> as
root before starting Elasticsearch, or set `nofile` to `65536` in
For the `.zip` and `.tar.gz` packages, set <<ulimit,`ulimit -n 65535`>> as
root before starting Elasticsearch, or set `nofile` to `65535` in
<<limits.conf,`/etc/security/limits.conf`>>.
On macOS, you must also pass the JVM option `-XX:-MaxFDLimit`
to Elasticsearch in order for it to make use of the higher file descriptor limit.
RPM and Debian packages already default the maximum number of file
descriptors to 65536 and do not require further configuration.
descriptors to 65535 and do not require further configuration.
You can check the `max_file_descriptors` configured for each node
using the <<cluster-nodes-stats>> API, with:

View File

@ -234,7 +234,7 @@ setup() {
local max_processes=$(cat /proc/$pid/limits | grep "Max processes" | awk '{ print $3 }')
[ "$max_processes" == "4096" ]
local max_open_files=$(cat /proc/$pid/limits | grep "Max open files" | awk '{ print $4 }')
[ "$max_open_files" == "65536" ]
[ "$max_open_files" == "65535" ]
local max_address_space=$(cat /proc/$pid/limits | grep "Max address space" | awk '{ print $4 }')
[ "$max_address_space" == "unlimited" ]
systemctl stop elasticsearch.service

View File

@ -267,7 +267,7 @@ final class BootstrapChecks {
private final int limit;
FileDescriptorCheck() {
this(1 << 16);
this(65535);
}
protected FileDescriptorCheck(final int limit) {

View File

@ -188,7 +188,7 @@ public class BootstrapChecksTests extends AbstractBootstrapCheckTestCase {
public void testFileDescriptorLimits() throws NodeValidationException {
final boolean osX = randomBoolean(); // simulates OS X versus non-OS X
final int limit = osX ? 10240 : 1 << 16;
final int limit = osX ? 10240 : 65535;
final AtomicLong maxFileDescriptorCount = new AtomicLong(randomIntBetween(1, limit - 1));
final BootstrapChecks.FileDescriptorCheck check;
if (osX) {