274 lines
12 KiB
Plaintext
274 lines
12 KiB
Plaintext
[[logging]]
|
|
=== Logging
|
|
|
|
include::{es-repo-dir}/tab-widgets/code.asciidoc[]
|
|
|
|
You can use {es}'s application logs to monitor your cluster and diagnose issues.
|
|
If you run {es} as a service, the default location of the logs varies based on
|
|
your platform and installation method:
|
|
|
|
include::{es-repo-dir}/tab-widgets/logging-widget.asciidoc[]
|
|
|
|
If you run {es} from the command line, {es} prints logs to the standard output
|
|
(`stdout`).
|
|
|
|
[discrete]
|
|
[[loggin-configuration]]
|
|
=== Logging configuration
|
|
|
|
Elasticsearch uses https://logging.apache.org/log4j/2.x/[Log4j 2] for
|
|
logging. Log4j 2 can be configured using the log4j2.properties
|
|
file. Elasticsearch exposes three properties, `${sys:es.logs.base_path}`,
|
|
`${sys:es.logs.cluster_name}`, and `${sys:es.logs.node_name}` that can be
|
|
referenced in the configuration file to determine the location of the log
|
|
files. The property `${sys:es.logs.base_path}` will resolve to the log directory,
|
|
`${sys:es.logs.cluster_name}` will resolve to the cluster name (used as the
|
|
prefix of log filenames in the default configuration), and
|
|
`${sys:es.logs.node_name}` will resolve to the node name (if the node name is
|
|
explicitly set).
|
|
|
|
For example, if your log directory (`path.logs`) is `/var/log/elasticsearch` and
|
|
your cluster is named `production` then `${sys:es.logs.base_path}` will resolve
|
|
to `/var/log/elasticsearch` and
|
|
`${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log`
|
|
will resolve to `/var/log/elasticsearch/production.log`.
|
|
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
######## Server JSON ############################
|
|
appender.rolling.type = RollingFile <1>
|
|
appender.rolling.name = rolling
|
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json <2>
|
|
appender.rolling.layout.type = ESJsonLayout <3>
|
|
appender.rolling.layout.type_name = server <4>
|
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz <5>
|
|
appender.rolling.policies.type = Policies
|
|
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <6>
|
|
appender.rolling.policies.time.interval = 1 <7>
|
|
appender.rolling.policies.time.modulate = true <8>
|
|
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <9>
|
|
appender.rolling.policies.size.size = 256MB <10>
|
|
appender.rolling.strategy.type = DefaultRolloverStrategy
|
|
appender.rolling.strategy.fileIndex = nomax
|
|
appender.rolling.strategy.action.type = Delete <11>
|
|
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
|
|
appender.rolling.strategy.action.condition.type = IfFileName <12>
|
|
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <13>
|
|
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14>
|
|
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15>
|
|
################################################
|
|
--------------------------------------------------
|
|
|
|
<1> Configure the `RollingFile` appender
|
|
<2> Log to `/var/log/elasticsearch/production_server.json`
|
|
<3> Use JSON layout.
|
|
<4> `type_name` is a flag populating the `type` field in a `ESJsonLayout`.
|
|
It can be used to distinguish different types of logs more easily when parsing them.
|
|
<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.json`; logs
|
|
will be compressed on each roll and `i` will be incremented
|
|
<6> Use a time-based roll policy
|
|
<7> Roll logs on a daily basis
|
|
<8> Align rolls on the day boundary (as opposed to rolling every twenty-four
|
|
hours)
|
|
<9> Using a size-based roll policy
|
|
<10> Roll logs after 256 MB
|
|
<11> Use a delete action when rolling logs
|
|
<12> Only delete logs matching a file pattern
|
|
<13> The pattern is to only delete the main logs
|
|
<14> Only delete if we have accumulated too many compressed logs
|
|
<15> The size condition on the compressed logs is 2 GB
|
|
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
######## Server - old style pattern ###########
|
|
appender.rolling_old.type = RollingFile
|
|
appender.rolling_old.name = rolling_old
|
|
appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <1>
|
|
appender.rolling_old.layout.type = PatternLayout
|
|
appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
|
appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz
|
|
|
|
--------------------------------------------------
|
|
<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.log` files and if archived will be in `*
|
|
.log.gz` files. Note that these should be considered deprecated and will be removed in the future.
|
|
|
|
NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace;
|
|
if you copy and paste any Log4j settings on this page, or enter any Log4j
|
|
configuration in general, be sure to trim any leading and trailing whitespace.
|
|
|
|
Note than you can replace `.gz` by `.zip` in `appender.rolling.filePattern` to
|
|
compress the rolled logs using the zip format. If you remove the `.gz`
|
|
extension then logs will not be compressed as they are rolled.
|
|
|
|
If you want to retain log files for a specified period of time, you can use a
|
|
rollover strategy with a delete action.
|
|
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
appender.rolling.strategy.type = DefaultRolloverStrategy <1>
|
|
appender.rolling.strategy.action.type = Delete <2>
|
|
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} <3>
|
|
appender.rolling.strategy.action.condition.type = IfFileName <4>
|
|
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <5>
|
|
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified <6>
|
|
appender.rolling.strategy.action.condition.nested_condition.age = 7D <7>
|
|
--------------------------------------------------
|
|
|
|
<1> Configure the `DefaultRolloverStrategy`
|
|
<2> Configure the `Delete` action for handling rollovers
|
|
<3> The base path to the Elasticsearch logs
|
|
<4> The condition to apply when handling rollovers
|
|
<5> Delete files from the base path matching the glob
|
|
`${sys:es.logs.cluster_name}-*`; this is the glob that log files are rolled
|
|
to; this is needed to only delete the rolled Elasticsearch logs but not also
|
|
delete the deprecation and slow logs
|
|
<6> A nested condition to apply to files matching the glob
|
|
<7> Retain logs for seven days
|
|
|
|
Multiple configuration files can be loaded (in which case they will get merged)
|
|
as long as they are named `log4j2.properties` and have the Elasticsearch config
|
|
directory as an ancestor; this is useful for plugins that expose additional
|
|
loggers. The logger section contains the java packages and their corresponding
|
|
log level. The appender section contains the destinations for the logs.
|
|
Extensive information on how to customize logging and all the supported
|
|
appenders can be found on the
|
|
https://logging.apache.org/log4j/2.x/manual/configuration.html[Log4j
|
|
documentation].
|
|
|
|
[discrete]
|
|
[[configuring-logging-levels]]
|
|
=== Configuring logging levels
|
|
|
|
Each Java package in the {es-repo}[{es} source code] has a related logger. For
|
|
example, the `org.elasticsearch.transport` package has
|
|
`logger.org.elasticsearch.transport` for logs related to communication between
|
|
nodes.
|
|
|
|
To get more or less verbose logs, use the <<cluster-update-settings,cluster
|
|
update settings API>> to change the related logger's log level. Each logger
|
|
accepts Log4j 2's built-in log levels, from least to most verbose: `OFF`,
|
|
`FATAL`, `ERROR`, `WARN`, `INFO`, `TRACE`, and `DEBUG`. The default log level is
|
|
`INFO`.
|
|
|
|
[source,console]
|
|
----
|
|
PUT /_cluster/settings
|
|
{
|
|
"transient": {
|
|
"logger.org.elasticsearch.transport": "TRACE"
|
|
}
|
|
}
|
|
----
|
|
|
|
Other ways to change log levels include:
|
|
|
|
1. `elasticsearch.yml`:
|
|
+
|
|
--
|
|
[source,yaml]
|
|
----
|
|
logger.org.elasticsearch.transport: TRACE
|
|
----
|
|
|
|
This is most appropriate when debugging a problem on a single node.
|
|
--
|
|
|
|
2. `log4j2.properties`:
|
|
+
|
|
--
|
|
[source,properties]
|
|
----
|
|
logger.transport.level = trace
|
|
----
|
|
|
|
This is most appropriate when you already need to change your Log4j 2
|
|
configuration for other reasons. For example, you may want to send logs for a
|
|
particular logger to another file. However, these use cases are rare.
|
|
--
|
|
|
|
[discrete]
|
|
[[deprecation-logging]]
|
|
=== Deprecation logging
|
|
|
|
In addition to regular logging, Elasticsearch allows you to enable logging
|
|
of deprecated actions. For example this allows you to determine early, if
|
|
you need to migrate certain functionality in the future. By default,
|
|
deprecation logging is enabled at the WARN level, the level at which all
|
|
deprecation log messages will be emitted.
|
|
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
logger.deprecation.level = warn
|
|
--------------------------------------------------
|
|
|
|
This will create a daily rolling deprecation log file in your log directory.
|
|
Check this file regularly, especially when you intend to upgrade to a new
|
|
major version.
|
|
|
|
The default logging configuration has set the roll policy for the deprecation
|
|
logs to roll and compress after 1 GB, and to preserve a maximum of five log
|
|
files (four rolled logs, and the active log).
|
|
|
|
You can disable it in the `config/log4j2.properties` file by setting the deprecation
|
|
log level to `error` like this:
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
logger.deprecation.name = org.elasticsearch.deprecation
|
|
logger.deprecation.level = error
|
|
--------------------------------------------------
|
|
|
|
|
|
You can identify what is triggering deprecated functionality if `X-Opaque-Id` was used as an HTTP header.
|
|
The user ID is included in the `X-Opaque-ID` field in deprecation JSON logs.
|
|
|
|
[source,js]
|
|
---------------------------
|
|
{
|
|
"type": "deprecation",
|
|
"timestamp": "2019-08-30T12:07:07,126+02:00",
|
|
"level": "WARN",
|
|
"component": "o.e.d.r.a.a.i.RestCreateIndexAction",
|
|
"cluster.name": "distribution_run",
|
|
"node.name": "node-0",
|
|
"message": "[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version.",
|
|
"x-opaque-id": "MY_USER_ID",
|
|
"cluster.uuid": "Aq-c-PAeQiK3tfBYtig9Bw",
|
|
"node.id": "D7fUYfnfTLa2D7y-xw6tZg"
|
|
}
|
|
---------------------------
|
|
// NOTCONSOLE
|
|
|
|
[discrete]
|
|
[[json-logging]]
|
|
=== JSON log format
|
|
|
|
To make parsing Elasticsearch logs easier, logs are now printed in a JSON format.
|
|
This is configured by a Log4J layout property `appender.rolling.layout.type = ESJsonLayout`.
|
|
This layout requires a `type_name` attribute to be set which is used to distinguish
|
|
logs streams when parsing.
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
appender.rolling.layout.type = ESJsonLayout
|
|
appender.rolling.layout.type_name = server
|
|
--------------------------------------------------
|
|
:es-json-layout-java-doc: {elasticsearch-javadoc}/org/elasticsearch/common/logging/ESJsonLayout.html
|
|
|
|
Each line contains a single JSON document with the properties configured in `ESJsonLayout`.
|
|
See this class {es-json-layout-java-doc}[javadoc] for more details.
|
|
However if a JSON document contains an exception, it will be printed over multiple lines.
|
|
The first line will contain regular properties and subsequent lines will contain the
|
|
stacktrace formatted as a JSON array.
|
|
|
|
|
|
NOTE: You can still use your own custom layout. To do that replace the line
|
|
`appender.rolling.layout.type` with a different layout. See sample below:
|
|
[source,properties]
|
|
--------------------------------------------------
|
|
appender.rolling.type = RollingFile
|
|
appender.rolling.name = rolling
|
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log
|
|
appender.rolling.layout.type = PatternLayout
|
|
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
|
--------------------------------------------------
|