Elasticsearch support to JSON logging (#36833)
In order to support JSON log format, a custom pattern layout was used and its configuration is enclosed in ESJsonLayout. Users are free to use their own patterns, but if smooth Beats integration is needed, they should use ESJsonLayout. EvilLoggerTests are left intact to make sure user's custom log patterns work fine. To populate additional fields node.id and cluster.uuid which are not available at start time, a cluster state update will have to be received and the values passed to log4j pattern converter. A ClusterStateObserver.Listener is used to receive only one ClusteStateUpdate. Once update is received the nodeId and clusterUUid are set in a static field in a NodeAndClusterIdConverter. Following fields are expected in JSON log lines: type, tiemstamp, level, component, cluster.name, node.name, node.id, cluster.uuid, message, stacktrace see ESJsonLayout.java for more details and field descriptions Docker log4j2 configuration is now almost the same as the one use for ES binary. The only difference is that docker is using console appenders, whereas ES is using file appenders. relates: #32850
This commit is contained in:
parent
6ed35fbb94
commit
891320f5ac
|
@ -27,7 +27,7 @@ integTestRunner {
|
||||||
*/
|
*/
|
||||||
if (System.getProperty("tests.rest.cluster") == null) {
|
if (System.getProperty("tests.rest.cluster") == null) {
|
||||||
systemProperty 'tests.logfile',
|
systemProperty 'tests.logfile',
|
||||||
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log"
|
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json"
|
||||||
} else {
|
} else {
|
||||||
systemProperty 'tests.logfile', '--external--'
|
systemProperty 'tests.logfile', '--external--'
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,11 +19,11 @@
|
||||||
|
|
||||||
package org.elasticsearch.test.rest;
|
package org.elasticsearch.test.rest;
|
||||||
|
|
||||||
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
|
import org.elasticsearch.common.logging.JsonLogsIntegTestCase;
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
@ -32,7 +32,7 @@ import java.security.PrivilegedAction;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
|
|
||||||
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
protected Matcher<String> nodeNameMatcher() {
|
protected Matcher<String> nodeNameMatcher() {
|
||||||
return is("node-0");
|
return is("node-0");
|
||||||
|
@ -41,7 +41,7 @@ public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
protected BufferedReader openReader(Path logFile) {
|
protected BufferedReader openReader(Path logFile) {
|
||||||
assumeFalse("Skipping test because it is being run against an external cluster.",
|
assumeFalse("Skipping test because it is being run against an external cluster.",
|
||||||
logFile.getFileName().toString().equals("--external--"));
|
logFile.getFileName().toString().equals("--external--"));
|
||||||
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
|
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
|
||||||
try {
|
try {
|
||||||
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
|
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
|
|
@ -1,9 +1,43 @@
|
||||||
status = error
|
status = error
|
||||||
|
|
||||||
appender.console.type = Console
|
# log action execution errors for easier debugging
|
||||||
appender.console.name = console
|
logger.action.name = org.elasticsearch.action
|
||||||
appender.console.layout.type = PatternLayout
|
logger.action.level = debug
|
||||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
|
||||||
|
appender.rolling.type = Console
|
||||||
|
appender.rolling.name = rolling
|
||||||
|
appender.rolling.layout.type = ESJsonLayout
|
||||||
|
appender.rolling.layout.type_name = server
|
||||||
|
|
||||||
rootLogger.level = info
|
rootLogger.level = info
|
||||||
rootLogger.appenderRef.console.ref = console
|
rootLogger.appenderRef.rolling.ref = rolling
|
||||||
|
|
||||||
|
appender.deprecation_rolling.type = Console
|
||||||
|
appender.deprecation_rolling.name = deprecation_rolling
|
||||||
|
appender.deprecation_rolling.layout.type = ESJsonLayout
|
||||||
|
appender.deprecation_rolling.layout.type_name = deprecation
|
||||||
|
|
||||||
|
logger.deprecation.name = org.elasticsearch.deprecation
|
||||||
|
logger.deprecation.level = warn
|
||||||
|
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
||||||
|
logger.deprecation.additivity = false
|
||||||
|
|
||||||
|
appender.index_search_slowlog_rolling.type = Console
|
||||||
|
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||||
|
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
|
||||||
|
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
|
||||||
|
|
||||||
|
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
||||||
|
logger.index_search_slowlog_rolling.level = trace
|
||||||
|
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
||||||
|
logger.index_search_slowlog_rolling.additivity = false
|
||||||
|
|
||||||
|
appender.index_indexing_slowlog_rolling.type = Console
|
||||||
|
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||||
|
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
|
||||||
|
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
|
||||||
|
|
||||||
|
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||||
|
logger.index_indexing_slowlog.level = trace
|
||||||
|
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
||||||
|
logger.index_indexing_slowlog.additivity = false
|
||||||
|
|
|
@ -9,12 +9,14 @@ appender.console.name = console
|
||||||
appender.console.layout.type = PatternLayout
|
appender.console.layout.type = PatternLayout
|
||||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
|
||||||
|
######## Server JSON ############################
|
||||||
appender.rolling.type = RollingFile
|
appender.rolling.type = RollingFile
|
||||||
appender.rolling.name = rolling
|
appender.rolling.name = rolling
|
||||||
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
|
||||||
appender.rolling.layout.type = PatternLayout
|
appender.rolling.layout.type = ESJsonLayout
|
||||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
appender.rolling.layout.type_name = server
|
||||||
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
|
||||||
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
|
||||||
appender.rolling.policies.type = Policies
|
appender.rolling.policies.type = Policies
|
||||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
appender.rolling.policies.time.interval = 1
|
appender.rolling.policies.time.interval = 1
|
||||||
|
@ -29,58 +31,144 @@ appender.rolling.strategy.action.condition.type = IfFileName
|
||||||
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
||||||
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
||||||
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
|
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
|
||||||
|
################################################
|
||||||
|
######## Server - old style pattern ###########
|
||||||
|
appender.rolling_old.type = RollingFile
|
||||||
|
appender.rolling_old.name = rolling_old
|
||||||
|
appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||||
|
appender.rolling_old.layout.type = PatternLayout
|
||||||
|
appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
|
||||||
|
appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
||||||
|
appender.rolling_old.policies.type = Policies
|
||||||
|
appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
|
appender.rolling_old.policies.time.interval = 1
|
||||||
|
appender.rolling_old.policies.time.modulate = true
|
||||||
|
appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.rolling_old.policies.size.size = 128MB
|
||||||
|
appender.rolling_old.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.rolling_old.strategy.fileIndex = nomax
|
||||||
|
appender.rolling_old.strategy.action.type = Delete
|
||||||
|
appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
|
||||||
|
appender.rolling_old.strategy.action.condition.type = IfFileName
|
||||||
|
appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
||||||
|
appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
||||||
|
appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
|
||||||
|
################################################
|
||||||
|
|
||||||
rootLogger.level = info
|
rootLogger.level = info
|
||||||
rootLogger.appenderRef.console.ref = console
|
rootLogger.appenderRef.console.ref = console
|
||||||
rootLogger.appenderRef.rolling.ref = rolling
|
rootLogger.appenderRef.rolling.ref = rolling
|
||||||
|
rootLogger.appenderRef.rolling_old.ref = rolling_old
|
||||||
|
|
||||||
|
######## Deprecation JSON #######################
|
||||||
appender.deprecation_rolling.type = RollingFile
|
appender.deprecation_rolling.type = RollingFile
|
||||||
appender.deprecation_rolling.name = deprecation_rolling
|
appender.deprecation_rolling.name = deprecation_rolling
|
||||||
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
|
||||||
appender.deprecation_rolling.layout.type = PatternLayout
|
appender.deprecation_rolling.layout.type = ESJsonLayout
|
||||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
appender.deprecation_rolling.layout.type_name = deprecation
|
||||||
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
|
|
||||||
|
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
|
||||||
appender.deprecation_rolling.policies.type = Policies
|
appender.deprecation_rolling.policies.type = Policies
|
||||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
appender.deprecation_rolling.policies.size.size = 1GB
|
appender.deprecation_rolling.policies.size.size = 1GB
|
||||||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
appender.deprecation_rolling.strategy.max = 4
|
appender.deprecation_rolling.strategy.max = 4
|
||||||
|
#################################################
|
||||||
|
######## Deprecation - old style pattern #######
|
||||||
|
appender.deprecation_rolling_old.type = RollingFile
|
||||||
|
appender.deprecation_rolling_old.name = deprecation_rolling_old
|
||||||
|
appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
||||||
|
appender.deprecation_rolling_old.layout.type = PatternLayout
|
||||||
|
appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
|
||||||
|
appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_deprecation-%i.log.gz
|
||||||
|
appender.deprecation_rolling_old.policies.type = Policies
|
||||||
|
appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.deprecation_rolling_old.policies.size.size = 1GB
|
||||||
|
appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.deprecation_rolling_old.strategy.max = 4
|
||||||
|
#################################################
|
||||||
logger.deprecation.name = org.elasticsearch.deprecation
|
logger.deprecation.name = org.elasticsearch.deprecation
|
||||||
logger.deprecation.level = warn
|
logger.deprecation.level = warn
|
||||||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
||||||
|
logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old
|
||||||
logger.deprecation.additivity = false
|
logger.deprecation.additivity = false
|
||||||
|
|
||||||
|
######## Search slowlog JSON ####################
|
||||||
appender.index_search_slowlog_rolling.type = RollingFile
|
appender.index_search_slowlog_rolling.type = RollingFile
|
||||||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
|
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
|
||||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
.cluster_name}_index_search_slowlog.json
|
||||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
|
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
|
||||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz
|
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
|
||||||
|
|
||||||
|
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
|
||||||
|
.cluster_name}_index_search_slowlog-%i.json.gz
|
||||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
appender.index_search_slowlog_rolling.policies.type = Policies
|
||||||
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
appender.index_search_slowlog_rolling.policies.size.size = 1GB
|
appender.index_search_slowlog_rolling.policies.size.size = 1GB
|
||||||
appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
|
appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
appender.index_search_slowlog_rolling.strategy.max = 4
|
appender.index_search_slowlog_rolling.strategy.max = 4
|
||||||
|
#################################################
|
||||||
|
######## Search slowlog - old style pattern ####
|
||||||
|
appender.index_search_slowlog_rolling_old.type = RollingFile
|
||||||
|
appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old
|
||||||
|
appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_search_slowlog.log
|
||||||
|
appender.index_search_slowlog_rolling_old.layout.type = PatternLayout
|
||||||
|
appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
|
||||||
|
appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_search_slowlog-%i.log.gz
|
||||||
|
appender.index_search_slowlog_rolling_old.policies.type = Policies
|
||||||
|
appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.index_search_slowlog_rolling_old.policies.size.size = 1GB
|
||||||
|
appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.index_search_slowlog_rolling_old.strategy.max = 4
|
||||||
|
#################################################
|
||||||
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
||||||
logger.index_search_slowlog_rolling.level = trace
|
logger.index_search_slowlog_rolling.level = trace
|
||||||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
||||||
|
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old
|
||||||
logger.index_search_slowlog_rolling.additivity = false
|
logger.index_search_slowlog_rolling.additivity = false
|
||||||
|
|
||||||
|
######## Indexing slowlog JSON ##################
|
||||||
appender.index_indexing_slowlog_rolling.type = RollingFile
|
appender.index_indexing_slowlog_rolling.type = RollingFile
|
||||||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
|
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
_index_indexing_slowlog.json
|
||||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
|
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
|
||||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz
|
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
|
||||||
|
|
||||||
|
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_indexing_slowlog-%i.json.gz
|
||||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||||
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
|
appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
|
||||||
appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
|
appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
appender.index_indexing_slowlog_rolling.strategy.max = 4
|
appender.index_indexing_slowlog_rolling.strategy.max = 4
|
||||||
|
#################################################
|
||||||
|
######## Indexing slowlog - old style pattern ##
|
||||||
|
appender.index_indexing_slowlog_rolling_old.type = RollingFile
|
||||||
|
appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old
|
||||||
|
appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_indexing_slowlog.log
|
||||||
|
appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout
|
||||||
|
appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
|
||||||
|
appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_indexing_slowlog-%i.log.gz
|
||||||
|
appender.index_indexing_slowlog_rolling_old.policies.type = Policies
|
||||||
|
appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB
|
||||||
|
appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.index_indexing_slowlog_rolling_old.strategy.max = 4
|
||||||
|
#################################################
|
||||||
|
|
||||||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||||
logger.index_indexing_slowlog.level = trace
|
logger.index_indexing_slowlog.level = trace
|
||||||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
||||||
|
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old
|
||||||
logger.index_indexing_slowlog.additivity = false
|
logger.index_indexing_slowlog.additivity = false
|
||||||
|
|
|
@ -25,6 +25,7 @@ See also <<release-highlights>> and <<es-release-notes>>.
|
||||||
* <<breaking_70_snapshotstats_changes>>
|
* <<breaking_70_snapshotstats_changes>>
|
||||||
* <<breaking_70_restclient_changes>>
|
* <<breaking_70_restclient_changes>>
|
||||||
* <<breaking_70_low_level_restclient_changes>>
|
* <<breaking_70_low_level_restclient_changes>>
|
||||||
|
* <<breaking_70_logging_changes>>
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Indices created before 7.0
|
=== Indices created before 7.0
|
||||||
|
@ -58,3 +59,4 @@ include::migrate_7_0/scripting.asciidoc[]
|
||||||
include::migrate_7_0/snapshotstats.asciidoc[]
|
include::migrate_7_0/snapshotstats.asciidoc[]
|
||||||
include::migrate_7_0/restclient.asciidoc[]
|
include::migrate_7_0/restclient.asciidoc[]
|
||||||
include::migrate_7_0/low_level_restclient.asciidoc[]
|
include::migrate_7_0/low_level_restclient.asciidoc[]
|
||||||
|
include::migrate_7_0/logging.asciidoc[]
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
[float]
|
||||||
|
[[breaking_70_logging_changes]]
|
||||||
|
=== Logging changes
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== New JSON format log files in `log` directory
|
||||||
|
|
||||||
|
Elasticsearch now will produce additional log files in JSON format. They will be stored in `*.json` suffix files.
|
||||||
|
Following files should be expected now in log directory:
|
||||||
|
* ${cluster_name}_server.json
|
||||||
|
* ${cluster_name}_deprecation.json
|
||||||
|
* ${cluster_name}_index_search_slowlog.json
|
||||||
|
* ${cluster_name}_index_indexing_slowlog.json
|
||||||
|
* ${cluster_name}.log
|
||||||
|
* ${cluster_name}_deprecation.log
|
||||||
|
* ${cluster_name}_index_search_slowlog.log
|
||||||
|
* ${cluster_name}_index_indexing_slowlog.log
|
||||||
|
* ${cluster_name}_audit.json
|
||||||
|
* gc.log
|
||||||
|
|
||||||
|
Note: You can configure which of these files are written by editing `log4j2.properties`.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Log files ending with `*.log` deprecated
|
||||||
|
Log files with the `.log` file extension using the old pattern layout format
|
||||||
|
are now considered deprecated and the newly added JSON log file format with
|
||||||
|
the `.json` file extension should be used instead.
|
||||||
|
Note: GC logs which are written to the file `gc.log` will not be changed.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Docker output in JSON format
|
||||||
|
|
||||||
|
All Docker console logs are now in JSON format. You can distinguish logs streams with the `type` field.
|
|
@ -20,43 +20,62 @@ will resolve to `/var/log/elasticsearch/production.log`.
|
||||||
|
|
||||||
[source,properties]
|
[source,properties]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
######## Server JSON ############################
|
||||||
appender.rolling.type = RollingFile <1>
|
appender.rolling.type = RollingFile <1>
|
||||||
appender.rolling.name = rolling
|
appender.rolling.name = rolling
|
||||||
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log <2>
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json <2>
|
||||||
appender.rolling.layout.type = PatternLayout
|
appender.rolling.layout.type = ESJsonLayout <3>
|
||||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
appender.rolling.layout.type_name = server <4>
|
||||||
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <3>
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz <5>
|
||||||
appender.rolling.policies.type = Policies
|
appender.rolling.policies.type = Policies
|
||||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4>
|
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <6>
|
||||||
appender.rolling.policies.time.interval = 1 <5>
|
appender.rolling.policies.time.interval = 1 <7>
|
||||||
appender.rolling.policies.time.modulate = true <6>
|
appender.rolling.policies.time.modulate = true <8>
|
||||||
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <7>
|
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <9>
|
||||||
appender.rolling.policies.size.size = 256MB <8>
|
appender.rolling.policies.size.size = 256MB <10>
|
||||||
appender.rolling.strategy.type = DefaultRolloverStrategy
|
appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||||
appender.rolling.strategy.fileIndex = nomax
|
appender.rolling.strategy.fileIndex = nomax
|
||||||
appender.rolling.strategy.action.type = Delete <9>
|
appender.rolling.strategy.action.type = Delete <11>
|
||||||
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
|
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
|
||||||
appender.rolling.strategy.action.condition.type = IfFileName <10>
|
appender.rolling.strategy.action.condition.type = IfFileName <12>
|
||||||
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <11>
|
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <13>
|
||||||
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <12>
|
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14>
|
||||||
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <13>
|
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15>
|
||||||
|
################################################
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
<1> Configure the `RollingFile` appender
|
<1> Configure the `RollingFile` appender
|
||||||
<2> Log to `/var/log/elasticsearch/production.log`
|
<2> Log to `/var/log/elasticsearch/production.json`
|
||||||
<3> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.log`; logs
|
<3> Use JSON layout.
|
||||||
|
<4> `type_name` is a flag populating the `type` field in a `ESJsonLayout`.
|
||||||
|
It can be used to distinguish different types of logs more easily when parsing them.
|
||||||
|
<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.json`; logs
|
||||||
will be compressed on each roll and `i` will be incremented
|
will be compressed on each roll and `i` will be incremented
|
||||||
<4> Use a time-based roll policy
|
<6> Use a time-based roll policy
|
||||||
<5> Roll logs on a daily basis
|
<7> Roll logs on a daily basis
|
||||||
<6> Align rolls on the day boundary (as opposed to rolling every twenty-four
|
<8> Align rolls on the day boundary (as opposed to rolling every twenty-four
|
||||||
hours)
|
hours)
|
||||||
<7> Using a size-based roll policy
|
<9> Using a size-based roll policy
|
||||||
<8> Roll logs after 256 MB
|
<10> Roll logs after 256 MB
|
||||||
<9> Use a delete action when rolling logs
|
<11> Use a delete action when rolling logs
|
||||||
<10> Only delete logs matching a file pattern
|
<12> Only delete logs matching a file pattern
|
||||||
<11> The pattern is to only delete the main logs
|
<13> The pattern is to only delete the main logs
|
||||||
<12> Only delete if we have accumulated too many compressed logs
|
<14> Only delete if we have accumulated too many compressed logs
|
||||||
<13> The size condition on the compressed logs is 2 GB
|
<15> The size condition on the compressed logs is 2 GB
|
||||||
|
|
||||||
|
[source,properties]
|
||||||
|
--------------------------------------------------
|
||||||
|
######## Server - old style pattern ###########
|
||||||
|
appender.rolling_old.type = RollingFile
|
||||||
|
appender.rolling_old.name = rolling_old
|
||||||
|
appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <1>
|
||||||
|
appender.rolling_old.layout.type = PatternLayout
|
||||||
|
appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz
|
||||||
|
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.log` files and if archived will be in `*
|
||||||
|
.log.gz` files. Note that these should be considered deprecated and will be removed in the future.
|
||||||
|
|
||||||
NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace;
|
NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace;
|
||||||
if you copy and paste any Log4j settings on this page, or enter any Log4j
|
if you copy and paste any Log4j settings on this page, or enter any Log4j
|
||||||
|
@ -194,3 +213,38 @@ files (four rolled logs, and the active log).
|
||||||
|
|
||||||
You can disable it in the `config/log4j2.properties` file by setting the deprecation
|
You can disable it in the `config/log4j2.properties` file by setting the deprecation
|
||||||
log level to `error`.
|
log level to `error`.
|
||||||
|
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[json-logging]]
|
||||||
|
=== JSON log format
|
||||||
|
|
||||||
|
To make parsing Elasticsearch logs easier, logs are now printed in a JSON format.
|
||||||
|
This is configured by a Log4J layout property `appender.rolling.layout.type = ESJsonLayout`.
|
||||||
|
This layout requires a `type_name` attribute to be set which is used to distinguish
|
||||||
|
logs streams when parsing.
|
||||||
|
[source,properties]
|
||||||
|
--------------------------------------------------
|
||||||
|
appender.rolling.layout.type = ESJsonLayout
|
||||||
|
appender.rolling.layout.type_name = server
|
||||||
|
--------------------------------------------------
|
||||||
|
:es-json-layout-java-doc: {elasticsearch-javadoc}/org/elasticsearch/common/logging/ESJsonLayout.html
|
||||||
|
|
||||||
|
Each line contains a single JSON document with the properties configured in `ESJsonLayout`.
|
||||||
|
See this class {es-json-layout-java-doc}[javadoc] for more details.
|
||||||
|
However if a JSON document contains an exception, it will be printed over multiple lines.
|
||||||
|
The first line will contain regular properties and subsequent lines will contain the
|
||||||
|
stacktrace formatted as a JSON array.
|
||||||
|
|
||||||
|
|
||||||
|
NOTE: You can still use your own custom layout. To do that replace the line
|
||||||
|
`appender.rolling.layout.type` with a different layout. See sample below:
|
||||||
|
[source,properties]
|
||||||
|
--------------------------------------------------
|
||||||
|
appender.rolling.type = RollingFile
|
||||||
|
appender.rolling.name = rolling
|
||||||
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log
|
||||||
|
appender.rolling.layout.type = PatternLayout
|
||||||
|
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
||||||
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
||||||
|
--------------------------------------------------
|
|
@ -28,7 +28,7 @@ integTestRunner {
|
||||||
systemProperty 'tests.security.manager', 'false'
|
systemProperty 'tests.security.manager', 'false'
|
||||||
systemProperty 'tests.system_call_filter', 'false'
|
systemProperty 'tests.system_call_filter', 'false'
|
||||||
systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}"
|
systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}"
|
||||||
systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}.log"
|
systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json"
|
||||||
systemProperty 'runtime.java.home', "${project.runtimeJavaHome}"
|
systemProperty 'runtime.java.home', "${project.runtimeJavaHome}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,10 +21,14 @@ package org.elasticsearch.qa.die_with_dignity;
|
||||||
|
|
||||||
import org.apache.http.ConnectionClosedException;
|
import org.apache.http.ConnectionClosedException;
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
|
import org.elasticsearch.cli.Terminal;
|
||||||
import org.elasticsearch.client.Request;
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
import org.elasticsearch.common.logging.JsonLogLine;
|
||||||
|
import org.elasticsearch.common.logging.JsonLogsStream;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
|
import org.hamcrest.Matchers;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -34,10 +38,12 @@ import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.either;
|
import static org.hamcrest.Matchers.either;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.hasItem;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.hamcrest.Matchers.hasToString;
|
import static org.hamcrest.Matchers.hasToString;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
@ -53,7 +59,7 @@ public class DieWithDignityIT extends ESRestTestCase {
|
||||||
final int pid = Integer.parseInt(pidFileLines.get(0));
|
final int pid = Integer.parseInt(pidFileLines.get(0));
|
||||||
Files.delete(pidFile);
|
Files.delete(pidFile);
|
||||||
IOException e = expectThrows(IOException.class,
|
IOException e = expectThrows(IOException.class,
|
||||||
() -> client().performRequest(new Request("GET", "/_die_with_dignity")));
|
() -> client().performRequest(new Request("GET", "/_die_with_dignity")));
|
||||||
Matcher<IOException> failureMatcher = instanceOf(ConnectionClosedException.class);
|
Matcher<IOException> failureMatcher = instanceOf(ConnectionClosedException.class);
|
||||||
if (Constants.WINDOWS) {
|
if (Constants.WINDOWS) {
|
||||||
/*
|
/*
|
||||||
|
@ -64,9 +70,9 @@ public class DieWithDignityIT extends ESRestTestCase {
|
||||||
* https://issues.apache.org/jira/browse/HTTPASYNC-134
|
* https://issues.apache.org/jira/browse/HTTPASYNC-134
|
||||||
*
|
*
|
||||||
* So we catch it here and consider it "ok".
|
* So we catch it here and consider it "ok".
|
||||||
*/
|
*/
|
||||||
failureMatcher = either(failureMatcher)
|
failureMatcher = either(failureMatcher)
|
||||||
.or(hasToString(containsString("An existing connection was forcibly closed by the remote host")));
|
.or(hasToString(containsString("An existing connection was forcibly closed by the remote host")));
|
||||||
}
|
}
|
||||||
assertThat(e, failureMatcher);
|
assertThat(e, failureMatcher);
|
||||||
|
|
||||||
|
@ -85,28 +91,62 @@ public class DieWithDignityIT extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// parse the logs and ensure that Elasticsearch died with the expected cause
|
try {
|
||||||
final List<String> lines = Files.readAllLines(PathUtils.get(System.getProperty("log")));
|
// parse the logs and ensure that Elasticsearch died with the expected cause
|
||||||
|
Path path = PathUtils.get(System.getProperty("log"));
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
|
final Iterator<JsonLogLine> it = stream.iterator();
|
||||||
|
|
||||||
final Iterator<String> it = lines.iterator();
|
boolean fatalError = false;
|
||||||
|
boolean fatalErrorInThreadExiting = false;
|
||||||
|
|
||||||
boolean fatalError = false;
|
while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) {
|
||||||
boolean fatalErrorInThreadExiting = false;
|
final JsonLogLine line = it.next();
|
||||||
|
if (isFatalError(line)) {
|
||||||
|
fatalError = true;
|
||||||
|
} else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) {
|
||||||
|
fatalErrorInThreadExiting = true;
|
||||||
|
assertThat(line.stacktrace(),
|
||||||
|
hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) {
|
assertTrue(fatalError);
|
||||||
final String line = it.next();
|
assertTrue(fatalErrorInThreadExiting);
|
||||||
if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) {
|
|
||||||
fatalError = true;
|
|
||||||
} else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]"
|
|
||||||
+ " fatal error in thread \\[Thread-\\d+\\], exiting$")) {
|
|
||||||
fatalErrorInThreadExiting = true;
|
|
||||||
assertTrue(it.hasNext());
|
|
||||||
assertThat(it.next(), equalTo("java.lang.OutOfMemoryError: die with dignity"));
|
|
||||||
}
|
}
|
||||||
|
} catch (AssertionError ae) {
|
||||||
|
Path path = PathUtils.get(System.getProperty("log"));
|
||||||
|
debugLogs(path);
|
||||||
|
throw ae;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
assertTrue(fatalError);
|
private boolean isWarnExceptionReceived(JsonLogLine line) {
|
||||||
assertTrue(fatalErrorInThreadExiting);
|
return line.level().equals("WARN")
|
||||||
|
&& line.component().equals("o.e.h.AbstractHttpServerTransport")
|
||||||
|
&& line.nodeName().equals("node-0")
|
||||||
|
&& line.message().contains("caught exception while handling client http traffic");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void debugLogs(Path path) throws IOException {
|
||||||
|
try (BufferedReader reader = Files.newBufferedReader(path)) {
|
||||||
|
Terminal terminal = Terminal.DEFAULT;
|
||||||
|
reader.lines().forEach(line -> terminal.println(line));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isFatalErrorInThreadExiting(JsonLogLine line) {
|
||||||
|
return line.level().equals("ERROR")
|
||||||
|
&& line.component().equals("o.e.b.ElasticsearchUncaughtExceptionHandler")
|
||||||
|
&& line.nodeName().equals("node-0")
|
||||||
|
&& line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$");
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isFatalError(JsonLogLine line) {
|
||||||
|
return line.level().equals("ERROR")
|
||||||
|
&& line.component().equals("o.e.ExceptionsHelper")
|
||||||
|
&& line.nodeName().equals("node-0")
|
||||||
|
&& line.message().contains("fatal error");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||||
|
apply plugin: 'elasticsearch.rest-test'
|
||||||
|
apply plugin: 'elasticsearch.standalone-test'
|
||||||
|
|
||||||
|
integTestCluster {
|
||||||
|
autoSetInitialMasterNodes = false
|
||||||
|
autoSetHostsProvider = false
|
||||||
|
/**
|
||||||
|
* Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch
|
||||||
|
* can successfully startup.
|
||||||
|
*/
|
||||||
|
extraConfigFile 'log4j2.properties', 'custom-log4j2.properties'
|
||||||
|
}
|
||||||
|
|
||||||
|
integTestRunner {
|
||||||
|
systemProperty 'tests.logfile',
|
||||||
|
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log"
|
||||||
|
}
|
||||||
|
|
||||||
|
unitTest {
|
||||||
|
systemProperty 'tests.security.manager', 'false'
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
|
||||||
|
status = error
|
||||||
|
|
||||||
|
# log action execution errors for easier debugging
|
||||||
|
logger.action.name = org.elasticsearch.action
|
||||||
|
logger.action.level = debug
|
||||||
|
|
||||||
|
appender.rolling.type = RollingFile
|
||||||
|
appender.rolling.name = rolling
|
||||||
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log
|
||||||
|
appender.rolling.layout.type = PatternLayout
|
||||||
|
appender.rolling.layout.pattern =%notEmpty{%node_name} %notEmpty{%node_and_cluster_id} %notEmpty{${sys:es.logs.cluster_name}} %m%n
|
||||||
|
|
||||||
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
||||||
|
appender.rolling.policies.type = Policies
|
||||||
|
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
|
appender.rolling.policies.time.interval = 1
|
||||||
|
appender.rolling.policies.time.modulate = true
|
||||||
|
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.rolling.policies.size.size = 128MB
|
||||||
|
appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.rolling.strategy.fileIndex = nomax
|
||||||
|
appender.rolling.strategy.action.type = Delete
|
||||||
|
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
|
||||||
|
appender.rolling.strategy.action.condition.type = IfFileName
|
||||||
|
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
||||||
|
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
||||||
|
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
|
||||||
|
|
||||||
|
rootLogger.level = info
|
||||||
|
rootLogger.appenderRef.rolling.ref = rolling
|
|
@ -0,0 +1,232 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.core.LoggerContext;
|
||||||
|
import org.apache.logging.log4j.core.config.Configurator;
|
||||||
|
import org.elasticsearch.cli.UserException;
|
||||||
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.env.Environment;
|
||||||
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.hamcrest.FeatureMatcher;
|
||||||
|
import org.hamcrest.Matcher;
|
||||||
|
import org.hamcrest.Matchers;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test confirms JSON log structure is properly formatted and can be parsed.
|
||||||
|
* It has to be in a <code>org.elasticsearch.common.logging</code> package to use <code>PrefixLogger</code>
|
||||||
|
*/
|
||||||
|
public class JsonLoggerTests extends ESTestCase {
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void initNodeName() {
|
||||||
|
LogConfigurator.setNodeName("sample-name");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
super.setUp();
|
||||||
|
LogConfigurator.registerErrorListener();
|
||||||
|
setupLogging("json_layout");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
LoggerContext context = (LoggerContext) LogManager.getContext(false);
|
||||||
|
Configurator.shutdown(context);
|
||||||
|
super.tearDown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public void testJsonLayout() throws IOException {
|
||||||
|
final Logger testLogger = LogManager.getLogger("test");
|
||||||
|
|
||||||
|
testLogger.error("This is an error message");
|
||||||
|
testLogger.warn("This is a warning message");
|
||||||
|
testLogger.info("This is an info message");
|
||||||
|
testLogger.debug("This is a debug message");
|
||||||
|
testLogger.trace("This is a trace message");
|
||||||
|
final Path path = clusterLogsPath();
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
|
List<JsonLogLine> jsonLogs = collectLines(stream);
|
||||||
|
|
||||||
|
assertThat(jsonLogs, Matchers.contains(
|
||||||
|
logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"),
|
||||||
|
logLine("file", Level.WARN, "sample-name", "test", "This is a warning message"),
|
||||||
|
logLine("file", Level.INFO, "sample-name", "test", "This is an info message"),
|
||||||
|
logLine("file", Level.DEBUG, "sample-name", "test", "This is a debug message"),
|
||||||
|
logLine("file", Level.TRACE, "sample-name", "test", "This is a trace message")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public void testPrefixLoggerInJson() throws IOException {
|
||||||
|
Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]"));
|
||||||
|
shardIdLogger.info("This is an info message with a shardId");
|
||||||
|
|
||||||
|
Logger prefixLogger = new PrefixLogger(LogManager.getLogger("prefixLogger"), "PREFIX");
|
||||||
|
prefixLogger.info("This is an info message with a prefix");
|
||||||
|
|
||||||
|
final Path path = clusterLogsPath();
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
|
List<JsonLogLine> jsonLogs = collectLines(stream);
|
||||||
|
assertThat(jsonLogs, Matchers.contains(
|
||||||
|
logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"),
|
||||||
|
logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testJsonInMessage() throws IOException {
|
||||||
|
final Logger testLogger = LogManager.getLogger("test");
|
||||||
|
String json = "{\n" +
|
||||||
|
" \"terms\" : {\n" +
|
||||||
|
" \"user\" : [\n" +
|
||||||
|
" \"u1\",\n" +
|
||||||
|
" \"u2\",\n" +
|
||||||
|
" \"u3\"\n" +
|
||||||
|
" ],\n" +
|
||||||
|
" \"boost\" : 1.0\n" +
|
||||||
|
" }\n" +
|
||||||
|
"}";
|
||||||
|
|
||||||
|
testLogger.info(json);
|
||||||
|
|
||||||
|
final Path path = clusterLogsPath();
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
|
List<JsonLogLine> jsonLogs = collectLines(stream);
|
||||||
|
assertThat(jsonLogs, Matchers.contains(
|
||||||
|
logLine("file", Level.INFO, "sample-name", "test", json)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testStacktrace() throws IOException {
|
||||||
|
final Logger testLogger = LogManager.getLogger("test");
|
||||||
|
testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message")));
|
||||||
|
|
||||||
|
final Path path = clusterLogsPath();
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
|
List<JsonLogLine> jsonLogs = collectLines(stream);
|
||||||
|
assertThat(jsonLogs, Matchers.contains(
|
||||||
|
Matchers.allOf(
|
||||||
|
logLine("file", Level.ERROR, "sample-name", "test", "error message"),
|
||||||
|
stacktraceWith("java.lang.Exception: exception message"),
|
||||||
|
stacktraceWith("Caused by: java.lang.RuntimeException: cause message")
|
||||||
|
)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testJsonInStacktraceMessageIsSplitted() throws IOException {
|
||||||
|
final Logger testLogger = LogManager.getLogger("test");
|
||||||
|
|
||||||
|
String json = "{\n" +
|
||||||
|
" \"terms\" : {\n" +
|
||||||
|
" \"user\" : [\n" +
|
||||||
|
" \"u1\",\n" +
|
||||||
|
" \"u2\",\n" +
|
||||||
|
" \"u3\"\n" +
|
||||||
|
" ],\n" +
|
||||||
|
" \"boost\" : 1.0\n" +
|
||||||
|
" }\n" +
|
||||||
|
"}";
|
||||||
|
testLogger.error("error message " + json, new Exception(json));
|
||||||
|
|
||||||
|
final Path path = clusterLogsPath();
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
|
List<JsonLogLine> jsonLogs = collectLines(stream);
|
||||||
|
|
||||||
|
assertThat(jsonLogs, Matchers.contains(
|
||||||
|
Matchers.allOf(
|
||||||
|
//message field will have a single line with json escaped
|
||||||
|
logLine("file", Level.ERROR, "sample-name", "test", "error message " + json),
|
||||||
|
|
||||||
|
//stacktrace field will have each json line will in a separate array element
|
||||||
|
stacktraceWith(("java.lang.Exception: " + json).split("\n"))
|
||||||
|
)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<JsonLogLine> collectLines(Stream<JsonLogLine> stream) {
|
||||||
|
return stream
|
||||||
|
.skip(1)//skip the first line from super class
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Path clusterLogsPath() {
|
||||||
|
return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name") + ".log");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setupLogging(final String config) throws IOException, UserException {
|
||||||
|
setupLogging(config, Settings.EMPTY);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setupLogging(final String config, final Settings settings) throws IOException, UserException {
|
||||||
|
assertFalse("Environment path.home variable should not be set", Environment.PATH_HOME_SETTING.exists(settings));
|
||||||
|
final Path configDir = getDataPath(config);
|
||||||
|
final Settings mergedSettings = Settings.builder()
|
||||||
|
.put(settings)
|
||||||
|
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||||
|
.build();
|
||||||
|
// need to use custom config path so we can use a custom log4j2.properties file for the test
|
||||||
|
final Environment environment = new Environment(mergedSettings, configDir);
|
||||||
|
LogConfigurator.configure(environment);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Matcher<JsonLogLine> logLine(String type, Level level, String nodeName, String component, String message) {
|
||||||
|
return new FeatureMatcher<JsonLogLine, Boolean>(Matchers.is(true), "logLine", "logLine") {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Boolean featureValueOf(JsonLogLine actual) {
|
||||||
|
return actual.type().equals(type) &&
|
||||||
|
actual.level().equals(level.toString()) &&
|
||||||
|
actual.nodeName().equals(nodeName) &&
|
||||||
|
actual.component().equals(component) &&
|
||||||
|
actual.message().equals(message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private Matcher<JsonLogLine> stacktraceWith(String... lines) {
|
||||||
|
return new FeatureMatcher<JsonLogLine, List<String>>(Matchers.hasItems(lines),
|
||||||
|
"stacktrace", "stacktrace") {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected List<String> featureValueOf(JsonLogLine actual) {
|
||||||
|
return actual.stacktrace();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.qa.custom_logging;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
|
import org.elasticsearch.test.hamcrest.RegexMatcher;
|
||||||
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
import org.hamcrest.Matchers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.security.AccessController;
|
||||||
|
import java.security.PrivilegedAction;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test verifies that Elasticsearch can startup successfully with a custom logging config using variables introduced in
|
||||||
|
* <code>ESJsonLayout</code>
|
||||||
|
* The intention is to confirm that users can still run their Elasticsearch instances with previous configurations.
|
||||||
|
*/
|
||||||
|
public class CustomLoggingConfigIT extends ESRestTestCase {
|
||||||
|
private static final String NODE_STARTED = ".*node-0.*cluster.uuid.*node.id.*started.*";
|
||||||
|
|
||||||
|
public void testSuccessfulStartupWithCustomConfig() throws Exception {
|
||||||
|
assertBusy(() -> {
|
||||||
|
List<String> lines = readAllLines(getLogFile());
|
||||||
|
assertThat(lines, Matchers.hasItem(RegexMatcher.matches(NODE_STARTED)));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<String> readAllLines(Path logFile) {
|
||||||
|
return AccessController.doPrivileged((PrivilegedAction<List<String>>) () -> {
|
||||||
|
try {
|
||||||
|
return Files.readAllLines(logFile, StandardCharsets.UTF_8);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new UncheckedIOException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "PathUtils doesn't have permission to read this file")
|
||||||
|
private Path getLogFile() {
|
||||||
|
String logFileString = System.getProperty("tests.logfile");
|
||||||
|
if (logFileString == null) {
|
||||||
|
fail("tests.logfile must be set to run this test. It is automatically "
|
||||||
|
+ "set by gradle. If you must set it yourself then it should be the absolute path to the "
|
||||||
|
+ "log file.");
|
||||||
|
}
|
||||||
|
return Paths.get(logFileString);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
appender.console.type = Console
|
||||||
|
appender.console.name = console
|
||||||
|
appender.console.layout.type = ESJsonLayout
|
||||||
|
appender.console.layout.type_name = console
|
||||||
|
|
||||||
|
appender.file.type = File
|
||||||
|
appender.file.name = file
|
||||||
|
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||||
|
appender.file.layout.type = ESJsonLayout
|
||||||
|
appender.file.layout.type_name = file
|
||||||
|
|
||||||
|
|
||||||
|
rootLogger.level = info
|
||||||
|
rootLogger.appenderRef.console.ref = console
|
||||||
|
rootLogger.appenderRef.file.ref = file
|
||||||
|
|
||||||
|
logger.test.name = test
|
||||||
|
logger.test.level = trace
|
||||||
|
logger.test.appenderRef.console.ref = console
|
||||||
|
logger.test.appenderRef.file.ref = file
|
||||||
|
logger.test.additivity = false
|
|
@ -0,0 +1,4 @@
|
||||||
|
grant {
|
||||||
|
// Needed to read the log file
|
||||||
|
permission java.io.FilePermission "${tests.logfile}", "read";
|
||||||
|
};
|
|
@ -30,5 +30,5 @@ integTestCluster {
|
||||||
|
|
||||||
integTestRunner {
|
integTestRunner {
|
||||||
systemProperty 'tests.logfile',
|
systemProperty 'tests.logfile',
|
||||||
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log"
|
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json"
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,11 +19,11 @@
|
||||||
|
|
||||||
package org.elasticsearch.unconfigured_node_name;
|
package org.elasticsearch.unconfigured_node_name;
|
||||||
|
|
||||||
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
|
import org.elasticsearch.common.logging.JsonLogsIntegTestCase;
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
@ -32,7 +32,7 @@ import java.security.PrivilegedAction;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.not;
|
import static org.hamcrest.Matchers.not;
|
||||||
|
|
||||||
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
protected Matcher<String> nodeNameMatcher() {
|
protected Matcher<String> nodeNameMatcher() {
|
||||||
return not("");
|
return not("");
|
|
@ -98,7 +98,7 @@ setup() {
|
||||||
systemctl start elasticsearch.service
|
systemctl start elasticsearch.service
|
||||||
wait_for_elasticsearch_status
|
wait_for_elasticsearch_status
|
||||||
assert_file_exist "/var/run/elasticsearch/elasticsearch.pid"
|
assert_file_exist "/var/run/elasticsearch/elasticsearch.pid"
|
||||||
assert_file_exist "/var/log/elasticsearch/elasticsearch.log"
|
assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json"
|
||||||
|
|
||||||
# Converts the epoch back in a human readable format
|
# Converts the epoch back in a human readable format
|
||||||
run date --date=@$epoch "+%Y-%m-%d %H:%M:%S"
|
run date --date=@$epoch "+%Y-%m-%d %H:%M:%S"
|
||||||
|
|
|
@ -428,7 +428,7 @@ describe_port() {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug_collect_logs() {
|
debug_collect_logs() {
|
||||||
local es_logfile="$ESLOG/elasticsearch.log"
|
local es_logfile="$ESLOG/elasticsearch_server.json"
|
||||||
local system_logfile='/var/log/messages'
|
local system_logfile='/var/log/messages'
|
||||||
|
|
||||||
if [ -e "$es_logfile" ]; then
|
if [ -e "$es_logfile" ]; then
|
||||||
|
|
|
@ -0,0 +1,118 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.core.Layout;
|
||||||
|
import org.apache.logging.log4j.core.LogEvent;
|
||||||
|
import org.apache.logging.log4j.core.config.Node;
|
||||||
|
import org.apache.logging.log4j.core.config.plugins.Plugin;
|
||||||
|
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
|
||||||
|
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
|
||||||
|
import org.apache.logging.log4j.core.layout.AbstractStringLayout;
|
||||||
|
import org.apache.logging.log4j.core.layout.ByteBufferDestination;
|
||||||
|
import org.apache.logging.log4j.core.layout.PatternLayout;
|
||||||
|
import org.elasticsearch.common.Strings;
|
||||||
|
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Formats log events as strings in a json format.
|
||||||
|
* <p>
|
||||||
|
* The class is wrapping the {@link PatternLayout} with a pattern to format into json. This gives more flexibility and control over how the
|
||||||
|
* log messages are formatted in {@link org.apache.logging.log4j.core.layout.JsonLayout}
|
||||||
|
*/
|
||||||
|
@Plugin(name = "ESJsonLayout", category = Node.CATEGORY, elementType = Layout.ELEMENT_TYPE, printObject = true)
|
||||||
|
public class ESJsonLayout extends AbstractStringLayout {
|
||||||
|
/**
|
||||||
|
* Fields used in a pattern to format a json log line:
|
||||||
|
* <ul>
|
||||||
|
* <li>type - the type of logs. These represent appenders and help docker distinguish log streams.</li>
|
||||||
|
* <li>timestamp - ISO8601 with additional timezone ID</li>
|
||||||
|
* <li>level - INFO, WARN etc</li>
|
||||||
|
* <li>component - logger name, most of the times class name</li>
|
||||||
|
* <li>cluster.name - taken from sys:es.logs.cluster_name system property because it is always set</li>
|
||||||
|
* <li>node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml</li>
|
||||||
|
* <li>node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present
|
||||||
|
* once clusterStateUpdate is first received</li>
|
||||||
|
* <li>message - a json escaped message. Multiline messages will be converted to single line with new line explicitly
|
||||||
|
* replaced to \n</li>
|
||||||
|
* <li>exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger.
|
||||||
|
* Taken from JsonThrowablePatternConverter</li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
private static final String PATTERN = "{" +
|
||||||
|
"\"type\": \"${TYPE}\", " +
|
||||||
|
"\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " +
|
||||||
|
"\"level\": \"%p\", " +
|
||||||
|
"\"component\": \"%c{1.}\", " +
|
||||||
|
"\"cluster.name\": \"${sys:es.logs.cluster_name}\", " +
|
||||||
|
"\"node.name\": \"%node_name\", " +
|
||||||
|
"%notEmpty{%node_and_cluster_id, } " +
|
||||||
|
"\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\" " +
|
||||||
|
"%exceptionAsJson " +
|
||||||
|
"}%n";
|
||||||
|
|
||||||
|
private final PatternLayout patternLayout;
|
||||||
|
|
||||||
|
protected ESJsonLayout(String typeName, Charset charset) {
|
||||||
|
super(charset);
|
||||||
|
this.patternLayout = PatternLayout.newBuilder()
|
||||||
|
.withPattern(pattern(typeName))
|
||||||
|
.withAlwaysWriteExceptions(false)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String pattern(String type) {
|
||||||
|
if (Strings.isEmpty(type)) {
|
||||||
|
throw new IllegalArgumentException("layout parameter 'type_name' cannot be empty");
|
||||||
|
}
|
||||||
|
return PATTERN.replace("${TYPE}", type);
|
||||||
|
}
|
||||||
|
|
||||||
|
@PluginFactory
|
||||||
|
public static ESJsonLayout createLayout(@PluginAttribute("type_name") String type,
|
||||||
|
@PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) {
|
||||||
|
return new ESJsonLayout(type, charset);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toSerializable(final LogEvent event) {
|
||||||
|
return patternLayout.toSerializable(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, String> getContentFormat() {
|
||||||
|
return patternLayout.getContentFormat();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void encode(final LogEvent event, final ByteBufferDestination destination) {
|
||||||
|
patternLayout.encode(event, destination);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
final StringBuilder sb = new StringBuilder("ESJsonLayout{");
|
||||||
|
sb.append("patternLayout=").append(patternLayout);
|
||||||
|
sb.append('}');
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache license, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the license for the specific language governing permissions and
|
||||||
|
* limitations under the license.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.core.io.JsonStringEncoder;
|
||||||
|
import org.apache.logging.log4j.core.LogEvent;
|
||||||
|
import org.apache.logging.log4j.core.config.Configuration;
|
||||||
|
import org.apache.logging.log4j.core.config.plugins.Plugin;
|
||||||
|
import org.apache.logging.log4j.core.pattern.ConverterKeys;
|
||||||
|
import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter;
|
||||||
|
import org.apache.logging.log4j.core.pattern.PatternConverter;
|
||||||
|
import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter;
|
||||||
|
import org.apache.logging.log4j.util.Strings;
|
||||||
|
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.util.StringJoiner;
|
||||||
|
|
||||||
|
/**
|
||||||
|
|
||||||
|
* Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array
|
||||||
|
* "exception": [ "stacktrace", "lines", "as", "array", "elements" ]
|
||||||
|
*
|
||||||
|
* Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from
|
||||||
|
* LoggingEvent into a multiline string
|
||||||
|
*/
|
||||||
|
@Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY)
|
||||||
|
@ConverterKeys({"exceptionAsJson"})
|
||||||
|
public final class JsonThrowablePatternConverter extends ThrowablePatternConverter {
|
||||||
|
private final ExtendedThrowablePatternConverter throwablePatternConverter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Private as only expected to be used by log4j2 newInstance method
|
||||||
|
*/
|
||||||
|
private JsonThrowablePatternConverter(final Configuration config, final String[] options) {
|
||||||
|
super("JsonThrowablePatternConverter", "throwable", options, config);
|
||||||
|
this.throwablePatternConverter = ExtendedThrowablePatternConverter.newInstance(config, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets an instance of the class.
|
||||||
|
*
|
||||||
|
* @param config The current Configuration.
|
||||||
|
* @param options pattern options, may be null. If first element is "short",
|
||||||
|
* only the first line of the throwable will be formatted.
|
||||||
|
* @return instance of class.
|
||||||
|
*/
|
||||||
|
public static JsonThrowablePatternConverter newInstance(final Configuration config, final String[] options) {
|
||||||
|
return new JsonThrowablePatternConverter(config, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void format(final LogEvent event, final StringBuilder toAppendTo) {
|
||||||
|
String consoleStacktrace = formatStacktrace(event);
|
||||||
|
if (Strings.isNotEmpty(consoleStacktrace)) {
|
||||||
|
String jsonStacktrace = formatJson(consoleStacktrace);
|
||||||
|
|
||||||
|
toAppendTo.append(", ");
|
||||||
|
toAppendTo.append(jsonStacktrace);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String formatStacktrace(LogEvent event) {
|
||||||
|
StringBuilder stringBuilder = new StringBuilder();
|
||||||
|
throwablePatternConverter.format(event, stringBuilder);
|
||||||
|
return stringBuilder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String formatJson(String consoleStacktrace) {
|
||||||
|
String lineSeparator = options.getSeparator() + "\t|" + options.getSeparator();
|
||||||
|
String[] split = consoleStacktrace.split(lineSeparator);
|
||||||
|
|
||||||
|
StringJoiner stringJoiner = new StringJoiner(",\n", "\n\"stacktrace\": [", "]");
|
||||||
|
for (String line : split) {
|
||||||
|
stringJoiner.add(wrapAsJson(line));
|
||||||
|
}
|
||||||
|
return stringJoiner.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String wrapAsJson(String line) {
|
||||||
|
byte[] bytes = JsonStringEncoder.getInstance().quoteAsUTF8(line);
|
||||||
|
return "\"" + new String(bytes, Charset.defaultCharset()) + "\"";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean handlesThrowable() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.core.LogEvent;
|
||||||
|
import org.apache.logging.log4j.core.config.plugins.Plugin;
|
||||||
|
import org.apache.logging.log4j.core.pattern.ConverterKeys;
|
||||||
|
import org.apache.logging.log4j.core.pattern.LogEventPatternConverter;
|
||||||
|
import org.apache.logging.log4j.core.pattern.PatternConverter;
|
||||||
|
import org.apache.lucene.util.SetOnce;
|
||||||
|
|
||||||
|
import java.util.Locale;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pattern converter to format the node_and_cluster_id variable into JSON fields <code>node.id</code> and <code>cluster.uuid</code>.
|
||||||
|
* Keeping those two fields together assures that they will be atomically set and become visible in logs at the same time.
|
||||||
|
*/
|
||||||
|
@Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter")
|
||||||
|
@ConverterKeys({"node_and_cluster_id"})
|
||||||
|
public final class NodeAndClusterIdConverter extends LogEventPatternConverter {
|
||||||
|
private static final SetOnce<String> nodeAndClusterId = new SetOnce<>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called by log4j2 to initialize this converter.
|
||||||
|
*/
|
||||||
|
public static NodeAndClusterIdConverter newInstance(@SuppressWarnings("unused") final String[] options) {
|
||||||
|
return new NodeAndClusterIdConverter();
|
||||||
|
}
|
||||||
|
|
||||||
|
public NodeAndClusterIdConverter() {
|
||||||
|
super("NodeAndClusterId", "node_and_cluster_id");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates only once the clusterID and nodeId.
|
||||||
|
* Subsequent executions will throw {@link org.apache.lucene.util.SetOnce.AlreadySetException}.
|
||||||
|
*
|
||||||
|
* @param nodeId a nodeId received from cluster state update
|
||||||
|
* @param clusterUUID a clusterId received from cluster state update
|
||||||
|
*/
|
||||||
|
public static void setNodeIdAndClusterId(String nodeId, String clusterUUID) {
|
||||||
|
nodeAndClusterId.set(formatIds(clusterUUID, nodeId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Formats the node.id and cluster.uuid into json fields.
|
||||||
|
*
|
||||||
|
* @param event - a log event is ignored in this method as it uses the nodeId and clusterId to format
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void format(LogEvent event, StringBuilder toAppendTo) {
|
||||||
|
if (nodeAndClusterId.get() != null) {
|
||||||
|
toAppendTo.append(nodeAndClusterId.get());
|
||||||
|
}
|
||||||
|
// nodeId/clusterUuid not received yet, not appending
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String formatIds(String clusterUUID, String nodeId) {
|
||||||
|
return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\"", clusterUUID, nodeId);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,77 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||||
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update
|
||||||
|
* it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter}.
|
||||||
|
* Once the first update is received, it will automatically be de-registered from subsequent updates.
|
||||||
|
*/
|
||||||
|
public class NodeAndClusterIdStateListener implements ClusterStateObserver.Listener {
|
||||||
|
private static final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class);
|
||||||
|
|
||||||
|
private NodeAndClusterIdStateListener() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Subscribes for the first cluster state update where nodeId and clusterId is present
|
||||||
|
* and sets these values in {@link NodeAndClusterIdConverter}.
|
||||||
|
*/
|
||||||
|
public static void getAndSetNodeIdAndClusterId(ClusterService clusterService, ThreadContext threadContext) {
|
||||||
|
ClusterState clusterState = clusterService.state();
|
||||||
|
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, threadContext);
|
||||||
|
|
||||||
|
observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::isNodeAndClusterIdPresent);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isNodeAndClusterIdPresent(ClusterState clusterState) {
|
||||||
|
return getNodeId(clusterState) != null && getClusterUUID(clusterState) != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String getClusterUUID(ClusterState state) {
|
||||||
|
return state.getMetaData().clusterUUID();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String getNodeId(ClusterState state) {
|
||||||
|
return state.getNodes().getLocalNodeId();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onNewClusterState(ClusterState state) {
|
||||||
|
String nodeId = getNodeId(state);
|
||||||
|
String clusterUUID = getClusterUUID(state);
|
||||||
|
|
||||||
|
logger.debug("Received cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID);
|
||||||
|
NodeAndClusterIdConverter.setNodeIdAndClusterId(nodeId, clusterUUID);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onClusterServiceClose() {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onTimeout(TimeValue timeout) {}
|
||||||
|
}
|
|
@ -67,6 +67,7 @@ import org.elasticsearch.common.inject.ModulesBuilder;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||||
|
import org.elasticsearch.common.logging.NodeAndClusterIdStateListener;
|
||||||
import org.elasticsearch.common.network.NetworkAddress;
|
import org.elasticsearch.common.network.NetworkAddress;
|
||||||
import org.elasticsearch.common.network.NetworkModule;
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
import org.elasticsearch.common.network.NetworkService;
|
import org.elasticsearch.common.network.NetworkService;
|
||||||
|
@ -683,10 +684,15 @@ public class Node implements Closeable {
|
||||||
transportService.acceptIncomingRequests();
|
transportService.acceptIncomingRequests();
|
||||||
discovery.startInitialJoin();
|
discovery.startInitialJoin();
|
||||||
final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings);
|
final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings);
|
||||||
|
NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(clusterService,
|
||||||
|
injector.getInstance(ThreadPool.class).getThreadContext());
|
||||||
|
|
||||||
if (initialStateTimeout.millis() > 0) {
|
if (initialStateTimeout.millis() > 0) {
|
||||||
final ThreadPool thread = injector.getInstance(ThreadPool.class);
|
final ThreadPool thread = injector.getInstance(ThreadPool.class);
|
||||||
ClusterState clusterState = clusterService.state();
|
ClusterState clusterState = clusterService.state();
|
||||||
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext());
|
ClusterStateObserver observer =
|
||||||
|
new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext());
|
||||||
|
|
||||||
if (clusterState.nodes().getMasterNodeId() == null) {
|
if (clusterState.nodes().getMasterNodeId() == null) {
|
||||||
logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout);
|
logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout);
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.core.LogEvent;
|
||||||
|
import org.apache.logging.log4j.core.impl.Log4jLogEvent;
|
||||||
|
import org.apache.logging.log4j.message.SimpleMessage;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.hamcrest.Matchers;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.StringReader;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
public class JsonThrowablePatternConverterTests extends ESTestCase {
|
||||||
|
JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null);
|
||||||
|
|
||||||
|
public void testNoStacktrace() throws IOException {
|
||||||
|
LogEvent event = Log4jLogEvent.newBuilder()
|
||||||
|
.build();
|
||||||
|
String result = format(event);
|
||||||
|
|
||||||
|
JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result)))
|
||||||
|
.findFirst()
|
||||||
|
.orElseThrow(() -> new AssertionError("no logs parsed"));
|
||||||
|
|
||||||
|
assertThat(jsonLogLine.stacktrace(), Matchers.nullValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testStacktraceWithJson() throws IOException {
|
||||||
|
LogManager.getLogger().info("asdf");
|
||||||
|
|
||||||
|
String json = "{\n" +
|
||||||
|
" \"terms\" : {\n" +
|
||||||
|
" \"user\" : [\n" +
|
||||||
|
" \"u1\",\n" +
|
||||||
|
" \"u2\",\n" +
|
||||||
|
" \"u3\"\n" +
|
||||||
|
" ],\n" +
|
||||||
|
" \"boost\" : 1.0\n" +
|
||||||
|
" }\n" +
|
||||||
|
"}";
|
||||||
|
Exception thrown = new Exception(json);
|
||||||
|
LogEvent event = Log4jLogEvent.newBuilder()
|
||||||
|
.setMessage(new SimpleMessage("message"))
|
||||||
|
.setThrown(thrown)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
String result = format(event);
|
||||||
|
|
||||||
|
//confirms exception is correctly parsed
|
||||||
|
|
||||||
|
JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result)))
|
||||||
|
.findFirst()
|
||||||
|
.orElseThrow(() -> new AssertionError("no logs parsed"));
|
||||||
|
|
||||||
|
int jsonLength = json.split("\n").length;
|
||||||
|
int stacktraceLength = thrown.getStackTrace().length;
|
||||||
|
assertThat("stacktrace should formatted in multiple lines",
|
||||||
|
jsonLogLine.stacktrace().size(), equalTo(jsonLength + stacktraceLength));
|
||||||
|
}
|
||||||
|
|
||||||
|
private String format(LogEvent event) {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
converter.format(event, builder);
|
||||||
|
String jsonStacktraceElement = builder.toString();
|
||||||
|
|
||||||
|
return "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", " +
|
||||||
|
"\"component\": \"o.e.a.s.TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", " +
|
||||||
|
"\"cluster.uuid\": \"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " +
|
||||||
|
jsonStacktraceElement + "}";
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,158 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a single log line in a json format.
|
||||||
|
* Parsing log lines with this class confirms the json format of logs
|
||||||
|
*/
|
||||||
|
public class JsonLogLine {
|
||||||
|
public static final ObjectParser<JsonLogLine, Void> PARSER = createParser(false);
|
||||||
|
|
||||||
|
private String type;
|
||||||
|
private String timestamp;
|
||||||
|
private String level;
|
||||||
|
private String component;
|
||||||
|
private String clusterName;
|
||||||
|
private String nodeName;
|
||||||
|
private String clusterUuid;
|
||||||
|
private String nodeId;
|
||||||
|
private String message;
|
||||||
|
private List<String> stacktrace;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
final StringBuilder sb = new StringBuilder("JsonLogLine{");
|
||||||
|
sb.append("type='").append(type).append('\'');
|
||||||
|
sb.append(", timestamp='").append(timestamp).append('\'');
|
||||||
|
sb.append(", level='").append(level).append('\'');
|
||||||
|
sb.append(", component='").append(component).append('\'');
|
||||||
|
sb.append(", clusterName='").append(clusterName).append('\'');
|
||||||
|
sb.append(", nodeName='").append(nodeName).append('\'');
|
||||||
|
sb.append(", clusterUuid='").append(clusterUuid).append('\'');
|
||||||
|
sb.append(", nodeId='").append(nodeId).append('\'');
|
||||||
|
sb.append(", message='").append(message).append('\'');
|
||||||
|
sb.append(", stacktrace=").append(stacktrace);
|
||||||
|
sb.append('}');
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
public String type() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String timestamp() {
|
||||||
|
return timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String level() {
|
||||||
|
return level;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String component() {
|
||||||
|
return component;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String clusterName() {
|
||||||
|
return clusterName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String nodeName() {
|
||||||
|
return nodeName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String clusterUuid() {
|
||||||
|
return clusterUuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String nodeId() {
|
||||||
|
return nodeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String message() {
|
||||||
|
return message;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<String> stacktrace() {
|
||||||
|
return stacktrace;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setType(String type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTimestamp(String timestamp) {
|
||||||
|
this.timestamp = timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLevel(String level) {
|
||||||
|
this.level = level;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setComponent(String component) {
|
||||||
|
this.component = component;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClusterName(String clusterName) {
|
||||||
|
this.clusterName = clusterName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setNodeName(String nodeName) {
|
||||||
|
this.nodeName = nodeName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClusterUuid(String clusterUuid) {
|
||||||
|
this.clusterUuid = clusterUuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setNodeId(String nodeId) {
|
||||||
|
this.nodeId = nodeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setMessage(String message) {
|
||||||
|
this.message = message;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStacktrace(List<String> stacktrace) {
|
||||||
|
this.stacktrace = stacktrace;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ObjectParser<JsonLogLine, Void> createParser(boolean ignoreUnknownFields) {
|
||||||
|
ObjectParser<JsonLogLine, Void> parser = new ObjectParser<>("search_template", ignoreUnknownFields, JsonLogLine::new);
|
||||||
|
parser.declareString(JsonLogLine::setType, new ParseField("type"));
|
||||||
|
parser.declareString(JsonLogLine::setTimestamp, new ParseField("timestamp"));
|
||||||
|
parser.declareString(JsonLogLine::setLevel, new ParseField("level"));
|
||||||
|
parser.declareString(JsonLogLine::setComponent, new ParseField("component"));
|
||||||
|
parser.declareString(JsonLogLine::setClusterName, new ParseField("cluster.name"));
|
||||||
|
parser.declareString(JsonLogLine::setNodeName, new ParseField("node.name"));
|
||||||
|
parser.declareString(JsonLogLine::setClusterUuid, new ParseField("cluster.uuid"));
|
||||||
|
parser.declareString(JsonLogLine::setNodeId, new ParseField("node.id"));
|
||||||
|
parser.declareString(JsonLogLine::setMessage, new ParseField("message"));
|
||||||
|
parser.declareStringArray(JsonLogLine::setStacktrace, new ParseField("stacktrace"));
|
||||||
|
|
||||||
|
return parser;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,129 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.isEmptyOrNullString;
|
||||||
|
import static org.hamcrest.Matchers.not;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests that extend this class verify that all json layout fields appear in the first few log lines after startup
|
||||||
|
* Fields available upon process startup: <code>type</code>, <code>timestamp</code>, <code>level</code>, <code>component</code>,
|
||||||
|
* <code>message</code>, <code>node.name</code>, <code>cluster.name</code>.
|
||||||
|
* Whereas <code>node.id</code> and <code>cluster.uuid</code> are available later once the first clusterState has been received.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* <code>node.name</code>, <code>cluster.name</code>, <code>node.id</code>, <code>cluster.uuid</code>
|
||||||
|
* should not change across all log lines
|
||||||
|
*
|
||||||
|
* Note that this won't pass for nodes in clusters that don't have the node name defined in elasticsearch.yml <strong>and</strong> start
|
||||||
|
* with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by <code>LogConfigurator.setNodeName</code>.
|
||||||
|
*/
|
||||||
|
public abstract class JsonLogsIntegTestCase extends ESRestTestCase {
|
||||||
|
/**
|
||||||
|
* Number of lines in the log file to check for the <code>node.name</code>, <code>node.id</code> or <code>cluster.uuid</code>. We don't
|
||||||
|
* just check the entire log file because it could be quite long
|
||||||
|
*/
|
||||||
|
private static final int LINES_TO_CHECK = 10;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The node name to expect in the log file.
|
||||||
|
*/
|
||||||
|
protected abstract org.hamcrest.Matcher<String> nodeNameMatcher();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Open the log file. This is delegated to subclasses because the test
|
||||||
|
* framework doesn't have permission to read from the log file but
|
||||||
|
* subclasses can grant themselves that permission.
|
||||||
|
*/
|
||||||
|
protected abstract BufferedReader openReader(Path logFile);
|
||||||
|
|
||||||
|
public void testElementsPresentOnAllLinesOfLog() throws IOException {
|
||||||
|
JsonLogLine firstLine = findFirstLine();
|
||||||
|
assertNotNull(firstLine);
|
||||||
|
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(openReader(getLogFile()))) {
|
||||||
|
stream.limit(LINES_TO_CHECK)
|
||||||
|
.forEach(jsonLogLine -> {
|
||||||
|
assertThat(jsonLogLine.type(), not(isEmptyOrNullString()));
|
||||||
|
assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString()));
|
||||||
|
assertThat(jsonLogLine.level(), not(isEmptyOrNullString()));
|
||||||
|
assertThat(jsonLogLine.component(), not(isEmptyOrNullString()));
|
||||||
|
assertThat(jsonLogLine.message(), not(isEmptyOrNullString()));
|
||||||
|
|
||||||
|
// all lines should have the same nodeName and clusterName
|
||||||
|
assertThat(jsonLogLine.nodeName(), nodeNameMatcher());
|
||||||
|
assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName()));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private JsonLogLine findFirstLine() throws IOException {
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(openReader(getLogFile()))) {
|
||||||
|
return stream.findFirst()
|
||||||
|
.orElseThrow(() -> new AssertionError("no logs at all?!"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException {
|
||||||
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(openReader(getLogFile()))) {
|
||||||
|
Iterator<JsonLogLine> iterator = stream.iterator();
|
||||||
|
|
||||||
|
JsonLogLine firstLine = null;
|
||||||
|
while (iterator.hasNext()) {
|
||||||
|
JsonLogLine jsonLogLine = iterator.next();
|
||||||
|
if (jsonLogLine.nodeId() != null) {
|
||||||
|
firstLine = jsonLogLine;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertNotNull(firstLine);
|
||||||
|
|
||||||
|
//once the nodeId and clusterId are received, they should be the same on remaining lines
|
||||||
|
|
||||||
|
int i = 0;
|
||||||
|
while (iterator.hasNext() && i++ < LINES_TO_CHECK) {
|
||||||
|
JsonLogLine jsonLogLine = iterator.next();
|
||||||
|
assertThat(jsonLogLine.nodeId(), equalTo(firstLine.nodeId()));
|
||||||
|
assertThat(jsonLogLine.clusterUuid(), equalTo(firstLine.clusterUuid()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "PathUtils doesn't have permission to read this file")
|
||||||
|
private Path getLogFile() {
|
||||||
|
String logFileString = System.getProperty("tests.logfile");
|
||||||
|
if (logFileString == null) {
|
||||||
|
fail("tests.logfile must be set to run this test. It is automatically "
|
||||||
|
+ "set by gradle. If you must set it yourself then it should be the absolute path to the "
|
||||||
|
+ "log file.");
|
||||||
|
}
|
||||||
|
return Paths.get(logFileString);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||||
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Spliterator;
|
||||||
|
import java.util.Spliterators;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
import java.util.stream.StreamSupport;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a stream of json log lines.
|
||||||
|
* This is intended to be used for easy and readable assertions for logger tests
|
||||||
|
*/
|
||||||
|
public class JsonLogsStream {
|
||||||
|
private final XContentParser parser;
|
||||||
|
private final BufferedReader reader;
|
||||||
|
|
||||||
|
private JsonLogsStream(BufferedReader reader) throws IOException {
|
||||||
|
this.reader = reader;
|
||||||
|
this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
|
||||||
|
reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Stream<JsonLogLine> from(BufferedReader reader) throws IOException {
|
||||||
|
return new JsonLogsStream(reader).stream();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Stream<JsonLogLine> from(Path path) throws IOException {
|
||||||
|
return from(Files.newBufferedReader(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Stream<JsonLogLine> stream() {
|
||||||
|
Spliterator<JsonLogLine> spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED);
|
||||||
|
return StreamSupport.stream(spliterator, false)
|
||||||
|
.onClose(this::close);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void close() {
|
||||||
|
try {
|
||||||
|
parser.close();
|
||||||
|
reader.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new UncheckedIOException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class JsonIterator implements Iterator<JsonLogLine> {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return parser.isClosed() == false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public JsonLogLine next() {
|
||||||
|
JsonLogLine apply = JsonLogLine.PARSER.apply(parser, null);
|
||||||
|
nextToken();
|
||||||
|
return apply;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void nextToken() {
|
||||||
|
try {
|
||||||
|
parser.nextToken();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new UncheckedIOException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,101 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.logging;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.nio.file.Paths;
|
|
||||||
import java.util.regex.Pattern;
|
|
||||||
import java.util.regex.Matcher;
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tests that extend this class verify that the node name appears in the first
|
|
||||||
* few log lines on startup. Note that this won't pass for clusters that don't
|
|
||||||
* the node name defined in elasticsearch.yml <strong>and</strong> start with
|
|
||||||
* DEBUG or TRACE level logging. Those nodes log a few lines before they
|
|
||||||
* resolve the node name.
|
|
||||||
*/
|
|
||||||
public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase {
|
|
||||||
/**
|
|
||||||
* Number of lines in the log file to check for the node name. We don't
|
|
||||||
* just check the entire log file because it could be quite long and
|
|
||||||
* exceptions don't include the node name.
|
|
||||||
*/
|
|
||||||
private static final int LINES_TO_CHECK = 10;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The node name to expect in the logs file.
|
|
||||||
*/
|
|
||||||
protected abstract org.hamcrest.Matcher<String> nodeNameMatcher();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Open the log file. This is delegated to subclasses because the test
|
|
||||||
* framework doesn't have permission to read from the log file but
|
|
||||||
* subclasses can grant themselves that permission.
|
|
||||||
*/
|
|
||||||
protected abstract BufferedReader openReader(Path logFile);
|
|
||||||
|
|
||||||
public void testNodeNameIsOnAllLinesOfLog() throws IOException {
|
|
||||||
BufferedReader logReader = openReader(getLogFile());
|
|
||||||
try {
|
|
||||||
String line = logReader.readLine();
|
|
||||||
assertNotNull("no logs at all?!", line);
|
|
||||||
Matcher m = Pattern.compile("\\] \\[([^\\]]+)\\] ").matcher(line);
|
|
||||||
if (false == m.find()) {
|
|
||||||
fail("Didn't see the node name in [" + line + "]");
|
|
||||||
}
|
|
||||||
String nodeName = m.group(1);
|
|
||||||
|
|
||||||
assertThat(nodeName, nodeNameMatcher());
|
|
||||||
|
|
||||||
int lineNumber = 1;
|
|
||||||
while (true) {
|
|
||||||
if (lineNumber < LINES_TO_CHECK) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
line = logReader.readLine();
|
|
||||||
if (line == null) {
|
|
||||||
break; // eof
|
|
||||||
}
|
|
||||||
lineNumber++;
|
|
||||||
assertThat(line, containsString("] [" + nodeName + "] "));
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
logReader.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressForbidden(reason = "PathUtils doesn't have permission to read this file")
|
|
||||||
private Path getLogFile() {
|
|
||||||
String logFileString = System.getProperty("tests.logfile");
|
|
||||||
if (null == logFileString) {
|
|
||||||
fail("tests.logfile must be set to run this test. It is automatically "
|
|
||||||
+ "set by gradle. If you must set it yourself then it should be the absolute path to the "
|
|
||||||
+ "log file.");
|
|
||||||
}
|
|
||||||
return Paths.get(logFileString);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -32,7 +32,7 @@ task writeJavaPolicy {
|
||||||
javaPolicy.write(
|
javaPolicy.write(
|
||||||
[
|
[
|
||||||
"grant {",
|
"grant {",
|
||||||
" permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log\", \"read\";",
|
" permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.json\", \"read\";",
|
||||||
"};"
|
"};"
|
||||||
].join("\n"))
|
].join("\n"))
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,8 @@ followClusterTestRunner {
|
||||||
systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy"
|
systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy"
|
||||||
systemProperty 'tests.target_cluster', 'follow'
|
systemProperty 'tests.target_cluster', 'follow'
|
||||||
systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}"
|
systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}"
|
||||||
systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log"
|
systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" +
|
||||||
|
"${-> followClusterTest.getNodes().get(0).clusterName}_server.json"
|
||||||
finalizedBy 'leaderClusterTestCluster#stop'
|
finalizedBy 'leaderClusterTestCluster#stop'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,13 +11,17 @@ import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.client.RestClient;
|
import org.elasticsearch.client.RestClient;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
import org.elasticsearch.common.logging.JsonLogLine;
|
||||||
|
import org.elasticsearch.common.logging.JsonLogsStream;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.hamcrest.FeatureMatcher;
|
||||||
|
import org.hamcrest.Matcher;
|
||||||
|
import org.hamcrest.Matchers;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Path;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.ObjectPath.eval;
|
import static org.elasticsearch.common.xcontent.ObjectPath.eval;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
@ -80,25 +84,10 @@ public class FollowIndexIT extends ESCCRRestTestCase {
|
||||||
// (does not work on windows...)
|
// (does not work on windows...)
|
||||||
if (Constants.WINDOWS == false) {
|
if (Constants.WINDOWS == false) {
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
final List<String> lines = Files.readAllLines(PathUtils.get(System.getProperty("log")));
|
Path path = PathUtils.get(System.getProperty("log"));
|
||||||
final Iterator<String> it = lines.iterator();
|
try (Stream<JsonLogLine> stream = JsonLogsStream.from(path)) {
|
||||||
boolean warn = false;
|
assertTrue(stream.anyMatch(autoFollowCoordinatorWarn()::matches));
|
||||||
while (it.hasNext()) {
|
|
||||||
final String line = it.next();
|
|
||||||
if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " +
|
|
||||||
"failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) {
|
|
||||||
warn = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
assertTrue(warn);
|
|
||||||
assertTrue(it.hasNext());
|
|
||||||
final String lineAfterWarn = it.next();
|
|
||||||
assertThat(
|
|
||||||
lineAfterWarn,
|
|
||||||
equalTo("org.elasticsearch.ElasticsearchStatusException: " +
|
|
||||||
"can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " +
|
|
||||||
"the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]"));
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -108,10 +97,26 @@ public class FollowIndexIT extends ESCCRRestTestCase {
|
||||||
assertThat(e.getMessage(), containsString("the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]"));
|
assertThat(e.getMessage(), containsString("the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Matcher<JsonLogLine> autoFollowCoordinatorWarn() {
|
||||||
|
return new FeatureMatcher<JsonLogLine, Boolean>(Matchers.is(true), "autoFollowCoordinatorWarn", "autoFollowCoordinatorWarn") {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Boolean featureValueOf(JsonLogLine actual) {
|
||||||
|
return actual.level().equals("WARN") &&
|
||||||
|
actual.component().equals("o.e.x.c.a.AutoFollowCoordinator") &&
|
||||||
|
actual.nodeName().equals("node-0") &&
|
||||||
|
actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") &&
|
||||||
|
actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " +
|
||||||
|
"as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" +
|
||||||
|
" on cluster [leader_cluster] does not enable [ccr]");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
private void createNewIndexAndIndexDocs(RestClient client, String index) throws IOException {
|
private void createNewIndexAndIndexDocs(RestClient client, String index) throws IOException {
|
||||||
Settings settings = Settings.builder()
|
Settings settings = Settings.builder()
|
||||||
.put("index.soft_deletes.enabled", true)
|
.put("index.soft_deletes.enabled", true)
|
||||||
.build();
|
.build();
|
||||||
Request request = new Request("PUT", "/" + index);
|
Request request = new Request("PUT", "/" + index);
|
||||||
request.setJsonEntity("{\"settings\": " + Strings.toString(settings) +
|
request.setJsonEntity("{\"settings\": " + Strings.toString(settings) +
|
||||||
", \"mappings\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}}");
|
", \"mappings\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}}");
|
||||||
|
|
Loading…
Reference in New Issue