Complete Elasticsearch logger names

This commit modifies the logger names within Elasticsearch to be the
fully-qualified class name as opposed removing the org.elasticsearch
prefix and dropping the class name. This change separates the root
logger from the Elasticsearch loggers (they were equated from the
removal of the org.elasticsearch prefix) and enables log levels to be
set at the class level (instead of the package level).

Relates #20457
This commit is contained in:
Jason Tedor 2016-09-13 22:46:54 -04:00 committed by GitHub
parent 0eff7daf5b
commit 7560101ec7
20 changed files with 113 additions and 124 deletions

View File

@ -42,23 +42,25 @@ public final class ESLoggerFactory {
Property.Dynamic, Property.NodeScope);
public static Logger getLogger(String prefix, String name) {
name = name.intern();
final Logger logger = LogManager.getLogger(name);
return new PrefixLogger((ExtendedLogger)logger, name, prefix);
return getLogger(prefix, LogManager.getLogger(name));
}
public static Logger getLogger(String prefix, Class<?> clazz) {
return getLogger(prefix, LogManager.getLogger(clazz));
}
public static Logger getLogger(String prefix, Logger logger) {
return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix);
}
public static Logger getLogger(Class<?> clazz) {
return getLogger(null, clazz);
}
public static Logger getLogger(String name) {
return getLogger(null, name);
}
public static DeprecationLogger getDeprecationLogger(String name) {
return new DeprecationLogger(getLogger(name));
}
public static DeprecationLogger getDeprecationLogger(String prefix, String name) {
return new DeprecationLogger(getLogger(prefix, name));
}
public static Logger getRootLogger() {
return LogManager.getRootLogger();
}

View File

@ -48,24 +48,8 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
public static final String SPACE = " ";
private static boolean consoleLoggingEnabled = true;
public static void disableConsoleLogging() {
consoleLoggingEnabled = false;
}
public static void enableConsoleLogging() {
consoleLoggingEnabled = true;
}
public static boolean consoleLoggingEnabled() {
return consoleLoggingEnabled;
}
public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
@ -84,10 +68,16 @@ public class Loggers {
}
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
final List<String> prefixesList = prefixesList(settings, prefixes);
return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
}
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
final List<String> prefixesList = prefixesList(settings, prefixes);
return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
}
private static List<String> prefixesList(Settings settings, String... prefixes) {
List<String> prefixesList = new ArrayList<>();
if (Node.NODE_NAME_SETTING.exists(settings)) {
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
@ -95,27 +85,31 @@ public class Loggers {
if (prefixes != null && prefixes.length > 0) {
prefixesList.addAll(asList(prefixes));
}
return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
return prefixesList;
}
public static Logger getLogger(Logger parentLogger, String s) {
assert parentLogger instanceof PrefixLogger;
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), getLoggerName(parentLogger.getName() + s));
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
}
public static Logger getLogger(String s) {
return ESLoggerFactory.getLogger(getLoggerName(s));
return ESLoggerFactory.getLogger(s);
}
public static Logger getLogger(Class<?> clazz) {
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
return ESLoggerFactory.getLogger(clazz);
}
public static Logger getLogger(Class<?> clazz, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), prefixes);
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
}
public static Logger getLogger(String name, String... prefixes) {
return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
}
private static String formatPrefix(String... prefixes) {
String prefix = null;
if (prefixes != null && prefixes.length > 0) {
StringBuilder sb = new StringBuilder();
@ -133,7 +127,7 @@ public class Loggers {
prefix = sb.toString();
}
}
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
return prefix;
}
/**
@ -170,21 +164,6 @@ public class Loggers {
}
}
private static String buildClassLoggerName(Class<?> clazz) {
String name = clazz.getName();
if (name.startsWith("org.elasticsearch.")) {
name = Classes.getPackageName(clazz);
}
return name;
}
private static String getLoggerName(String name) {
if (name.startsWith("org.elasticsearch.")) {
name = name.substring("org.elasticsearch.".length());
}
return commonPrefix + name;
}
public static void addAppender(final Logger logger, final Appender appender) {
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
final Configuration config = ctx.getConfiguration();

View File

@ -25,19 +25,12 @@ import org.elasticsearch.common.logging.Loggers;
/** An InfoStream (for Lucene's IndexWriter) that redirects
* messages to "lucene.iw.ifd" and "lucene.iw" Logger.trace. */
public final class LoggerInfoStream extends InfoStream {
/** Used for component-specific logging: */
/** Logger for everything */
private final Logger logger;
private final Logger parentLogger;
/** Logger for IndexFileDeleter */
private final Logger ifdLogger;
public LoggerInfoStream(Logger parentLogger) {
logger = Loggers.getLogger(parentLogger, ".lucene.iw");
ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd");
public LoggerInfoStream(final Logger parentLogger) {
this.parentLogger = parentLogger;
}
@Override
@ -53,14 +46,11 @@ public final class LoggerInfoStream extends InfoStream {
}
private Logger getLogger(String component) {
if (component.equals("IFD")) {
return ifdLogger;
} else {
return logger;
}
return Loggers.getLogger(parentLogger, "." + component);
}
@Override
public void close() {
}
}

View File

@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
@TestLogging("_root:DEBUG,action.admin.indices.shards:TRACE,cluster.service:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.action.admin.indices.shards:TRACE,org.elasticsearch.cluster.service:TRACE")
public class IndicesShardStoreRequestIT extends ESIntegTestCase {
@Override

View File

@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
// this test sometimes fails in recovery when the recovery is reset, increasing the logging level to help debug
@TestLogging("indices.recovery:DEBUG")
@TestLogging("org.elasticsearch.indices.recovery:DEBUG")
public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase {
/**

View File

@ -65,7 +65,7 @@ import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
@ESIntegTestCase.SuppressLocalMode
@TestLogging("_root:DEBUG,cluster.service:TRACE,discovery.zen:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE")
public class MinimumMasterNodesIT extends ESIntegTestCase {
@Override

View File

@ -363,7 +363,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
}
@TestLogging("_root:debug,action.admin.cluster.tasks:trace")
@TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace")
public void testPendingUpdateTask() throws Exception {
Settings settings = Settings.builder()
.put("discovery.type", "local")

View File

@ -677,18 +677,30 @@ public class ClusterServiceTests extends ESTestCase {
latch.await();
}
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
@TestLogging("org.elasticsearch.cluster.service:TRACE") // To ensure that we log cluster state events on TRACE level
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE,
"*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG,
"*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.TRACE,
"*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.DEBUG,
"*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
Logger rootLogger = LogManager.getRootLogger();
Loggers.addAppender(rootLogger, mockAppender);
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService.currentTimeOverride = System.nanoTime();
@ -743,7 +755,7 @@ public class ClusterServiceTests extends ESTestCase {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
@ -763,25 +775,41 @@ public class ClusterServiceTests extends ESTestCase {
});
latch.await();
} finally {
Loggers.removeAppender(rootLogger, mockAppender);
Loggers.removeAppender(clusterLogger, mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
@TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
@TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low",
"cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't see because setting is too low",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
Logger rootLogger = LogManager.getRootLogger();
Loggers.addAppender(rootLogger, mockAppender);
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
@ -857,7 +885,7 @@ public class ClusterServiceTests extends ESTestCase {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
@ -877,7 +905,7 @@ public class ClusterServiceTests extends ESTestCase {
});
latch.await();
} finally {
Loggers.removeAppender(rootLogger, mockAppender);
Loggers.removeAppender(clusterLogger, mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}

View File

@ -118,7 +118,7 @@ import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
@ESIntegTestCase.SuppressLocalMode
@TestLogging("_root:DEBUG,cluster.service:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE")
public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
private static final TimeValue DISRUPTION_HEALING_OVERHEAD = TimeValue.timeValueSeconds(40); // we use 30s as timeout in many places.
@ -384,7 +384,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition
* and verifies that all node agree on the new cluster state
*/
@TestLogging("_root:DEBUG,cluster.service:TRACE,gateway:TRACE,indices.store:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.gateway:TRACE,org.elasticsearch.indices.store:TRACE")
public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception {
final List<String> nodes = startCluster(3);
@ -454,8 +454,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* <p>
* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
*/
@TestLogging("_root:DEBUG,action.index:TRACE,action.get:TRACE,discovery:TRACE,cluster.service:TRACE,"
+ "indices.recovery:TRACE,indices.cluster:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.action.index:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE,org.elasticsearch.cluster.service:TRACE,"
+ "org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.indices.cluster:TRACE")
public void testAckedIndexing() throws Exception {
final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5;
@ -636,7 +636,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* that already are following another elected master node. These nodes should reject this cluster state and prevent
* them from following the stale master.
*/
@TestLogging("_root:DEBUG,cluster.service:TRACE,test.disruption:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE")
public void testStaleMasterNotHijackingMajority() throws Exception {
// 3 node cluster with unicast discovery and minimum_master_nodes set to 2:
final List<String> nodes = startCluster(3, 2);

View File

@ -91,7 +91,7 @@ import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@TestLogging("discovery.zen:TRACE")
@TestLogging("org.elasticsearch.discovery.zen:TRACE")
public class NodeJoinControllerTests extends ESTestCase {
private static ThreadPool threadPool;

View File

@ -82,7 +82,7 @@ import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@TestLogging("discovery.zen.publish:TRACE")
@TestLogging("org.elasticsearch.discovery.zen.publish:TRACE")
public class PublishClusterStateActionTests extends ESTestCase {
private static final ClusterName CLUSTER_NAME = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY);

View File

@ -184,7 +184,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
}
@TestLogging("gateway:TRACE")
@TestLogging("org.elasticsearch.gateway:TRACE")
public void testIndexWithFewDocuments() throws Exception {
final Path dataPath = createTempDir();
Settings nodeSettings = nodeSettings(dataPath);
@ -599,7 +599,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
// deleting the index and hence, deleting all the shard data for the index, the test
// failure still showed some Lucene files in the data directory for that index. Not sure
// why that is, so turning on more logging here.
@TestLogging("indices:TRACE,env:TRACE")
@TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE")
public void testShadowReplicaNaturalRelocation() throws Exception {
Path dataPath = createTempDir();
Settings nodeSettings = nodeSettings(dataPath);

View File

@ -1516,11 +1516,11 @@ public class InternalEngineTests extends ESTestCase {
public void append(LogEvent event) {
final String formattedMessage = event.getMessage().getFormattedMessage();
if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][1] ")) {
if (event.getLoggerName().endsWith("lucene.iw") &&
if (event.getLoggerName().endsWith(".IW") &&
formattedMessage.contains("IW: apply all deletes during flush")) {
sawIndexWriterMessage = true;
}
if (event.getLoggerName().endsWith("lucene.iw.ifd")) {
if (event.getLoggerName().endsWith(".IFD")) {
sawIndexWriterIFDMessage = true;
}
}
@ -1564,16 +1564,7 @@ public class InternalEngineTests extends ESTestCase {
assumeFalse("who tests the tester?", VERBOSE);
MockAppender mockAppender = new MockAppender("testIndexWriterIFDInfoStream");
final Logger iwIFDLogger;
if (LogManager.getContext(false).hasLogger("org.elasticsearch.index.engine.lucene.iw.ifd")) {
// Works when running this test inside Intellij:
iwIFDLogger = LogManager.getLogger("org.elasticsearch.index.engine.lucene.iw.ifd");
assertNotNull(iwIFDLogger);
} else {
// Works when running this test from command line:
assertTrue(LogManager.getContext(false).hasLogger("index.engine.lucene.iw.ifd"));
iwIFDLogger = LogManager.getLogger("index.engine.lucene.iw.ifd");
}
final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD");
Loggers.addAppender(iwIFDLogger, mockAppender);
Loggers.setLevel(iwIFDLogger, Level.DEBUG);

View File

@ -473,7 +473,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
* TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several
* parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard.
*/
@TestLogging("monitor.fs:DEBUG")
@TestLogging("org.elasticsearch.monitor.fs:DEBUG")
public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);

View File

@ -44,7 +44,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
private static final int RELOCATION_COUNT = 25;
@TestLogging("_root:DEBUG,action.delete:TRACE,action.index:TRACE,index.shard:TRACE,cluster.service:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.action.delete:TRACE,org.elasticsearch.action.index:TRACE,index.shard:TRACE,org.elasticsearch.cluster.service:TRACE")
public void testPrimaryRelocationWhileIndexing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3));
client().admin().indices().prepareCreate("test")

View File

@ -53,7 +53,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllS
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
@TestLogging("_root:DEBUG,index.shard:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE")
public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
private final Logger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class);

View File

@ -90,7 +90,7 @@ import static org.hamcrest.Matchers.startsWith;
/**
*/
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
@TestLogging("_root:DEBUG,indices.recovery:TRACE,index.shard.service:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.index.shard.service:TRACE")
public class RelocationIT extends ESIntegTestCase {
private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES);

View File

@ -1,19 +1,19 @@
status = error
# log action execution errors for easier debugging
logger.action.name = action
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
@ -28,7 +28,7 @@ appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
@ -36,7 +36,7 @@ appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = deprecation
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false

View File

@ -32,7 +32,6 @@ import static java.lang.annotation.ElementType.TYPE;
* It supports multiple logger:level comma separated key value pairs
* Use the _root keyword to set the root logger level
* e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE")
* or just @TestLogging("_root:DEBUG,cluster.metadata:TRACE") since we start the test with -Des.logger.prefix=
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({PACKAGE, TYPE, METHOD})

View File

@ -791,7 +791,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
assertTrue(inFlight.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
}
@TestLogging(value = "test.transport.tracer:TRACE")
@TestLogging(value = "org.elasticsearch.test.transport.tracer:TRACE")
public void testTracerLog() throws InterruptedException {
TransportRequestHandler handler = new TransportRequestHandler<StringMessageRequest>() {
@Override