Merge pull request #19126 from ywelsch/fix/reenable-loggerusagechecker
Reenable logger usage checker
This commit is contained in:
commit
8b62f99104
|
@ -190,7 +190,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
|
@ -210,11 +210,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
logger.debug("{}: Failed to execute [{}]", t, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
|
@ -239,7 +235,8 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
logger.trace("{}: Failed to execute [{}] lastShard [{}]", t, shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request, lastShard);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
|
@ -251,22 +248,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
logger.debug("{}: Failed to execute [{}] lastShard [{}]", t,
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request,
|
||||
boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
|
|
|
@ -103,7 +103,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
@ -116,7 +116,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
|
|
@ -107,7 +107,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
|
|
@ -91,7 +91,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConne
|
|||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
logger.warn("failed to disconnect to node [{}]", e, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,14 @@ package org.elasticsearch.common.logging;
|
|||
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
|
||||
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
|
||||
|
||||
/**
|
||||
* Elasticsearch's logger wrapper.
|
||||
*/
|
||||
@SuppressLoggerChecks(reason = "safely delegates to itself")
|
||||
public class ESLogger {
|
||||
private static final String FQCN = ESLogger.class.getName();
|
||||
|
||||
|
|
|
@ -94,11 +94,10 @@ public class ElectMasterService extends AbstractComponent {
|
|||
public void logMinimumMasterNodesWarningIfNecessary(ClusterState oldState, ClusterState newState) {
|
||||
// check if min_master_nodes setting is too low and log warning
|
||||
if (hasTooManyMasterNodes(oldState.nodes()) == false && hasTooManyMasterNodes(newState.nodes())) {
|
||||
logger.warn("value for setting \""
|
||||
+ ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()
|
||||
+ "\" is too low. This can result in data loss! Please set it to at least a quorum of master-eligible nodes "
|
||||
+ "(current value: [{}], total number of master-eligible nodes used for publishing in this round: [{}])",
|
||||
minimumMasterNodes(), newState.getNodes().getMasterNodes().size());
|
||||
logger.warn("value for setting \"{}\" is too low. This can result in data loss! Please set it to at least a quorum of master-" +
|
||||
"eligible nodes (current value: [{}], total number of master-eligible nodes used for publishing in this round: [{}])",
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNodes(),
|
||||
newState.getNodes().getMasterNodes().size());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
if (executors.containsKey(executorHolder.info.getName())) {
|
||||
throw new IllegalStateException("duplicate executors with name [" + executorHolder.info.getName() + "] registered");
|
||||
}
|
||||
logger.debug("created thread pool: " + entry.getValue().formatInfo(executorHolder.info));
|
||||
logger.debug("created thread pool: {}", entry.getValue().formatInfo(executorHolder.info));
|
||||
executors.put(entry.getKey(), executorHolder);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -46,6 +47,7 @@ public class MaxMapCountCheckTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressLoggerChecks(reason = "mock usage")
|
||||
public void testGetMaxMapCount() throws IOException {
|
||||
final long procSysVmMaxMapCount = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
final BufferedReader reader = mock(BufferedReader.class);
|
||||
|
|
|
@ -65,7 +65,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
|
|||
.build());
|
||||
this.numberOfShards = randomIntBetween(1, 5);
|
||||
this.numberOfReplicas = randomIntBetween(1, 5);
|
||||
logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas.");
|
||||
logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas);
|
||||
this.primaryTermsPerIndex.clear();
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(createIndexMetaData(TEST_INDEX_1))
|
||||
|
@ -87,7 +87,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
|
|||
* puts primary shard routings into initializing state
|
||||
*/
|
||||
private void initPrimaries() {
|
||||
logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting");
|
||||
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
|
||||
Builder discoBuilder = DiscoveryNodes.builder();
|
||||
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
|
||||
discoBuilder = discoBuilder.put(newNode("node" + i));
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -67,6 +68,7 @@ public class AbstractLifecycleRunnableTests extends ESTestCase {
|
|||
inOrder.verifyNoMoreInteractions();
|
||||
}
|
||||
|
||||
@SuppressLoggerChecks(reason = "mock usage")
|
||||
public void testDoRunDoesNotRunWhenStoppedOrClosed() throws Exception {
|
||||
Callable<?> runCallable = mock(Callable.class);
|
||||
|
||||
|
|
|
@ -343,7 +343,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
} else {
|
||||
// test with a shadow replica index
|
||||
final Path dataPath = createTempDir();
|
||||
logger.info("--> created temp data path for shadow replicas [" + dataPath + "]");
|
||||
logger.info("--> created temp data path for shadow replicas [{}]", dataPath);
|
||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put("node.add_id_to_custom_path", false)
|
||||
|
|
|
@ -263,7 +263,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
assertThat(waitForShardDeletion(node_3, index, 0), equalTo(false));
|
||||
|
||||
Path server2Shard = shardDirectory(node_2, index, 0);
|
||||
logger.info("--> stopping node " + node_2);
|
||||
logger.info("--> stopping node {}", node_2);
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
|
||||
|
||||
logger.info("--> running cluster_health");
|
||||
|
|
|
@ -479,7 +479,7 @@ public class RelocationIT extends ESIntegTestCase {
|
|||
indexRandom(true, docs);
|
||||
numDocs *= 2;
|
||||
|
||||
logger.info(" --> waiting for relocation to complete", numDocs);
|
||||
logger.info(" --> waiting for relocation to complete");
|
||||
ensureGreen("test");// move all shards to the new node (it waits on relocation)
|
||||
final int numIters = randomIntBetween(10, 20);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
|
|
|
@ -404,7 +404,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.warn("Failed to clear scroll [" + scrollId + ']', e);
|
||||
logger.warn("Failed to clear scroll [{}]", e, scrollId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public class ESLoggerUsageChecker {
|
|||
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (Files.isRegularFile(file) && file.endsWith(".class")) {
|
||||
if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) {
|
||||
try (InputStream in = Files.newInputStream(file)) {
|
||||
ESLoggerUsageChecker.check(wrongUsageCallback, in);
|
||||
}
|
||||
|
@ -248,19 +248,6 @@ public class ESLoggerUsageChecker {
|
|||
if (LOGGER_METHODS.contains(methodInsn.name) == false) {
|
||||
continue;
|
||||
}
|
||||
Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
|
||||
BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument
|
||||
if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) {
|
||||
wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
|
||||
"First argument must be a string constant so that we can statically ensure proper place holder usage"));
|
||||
continue;
|
||||
}
|
||||
PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject;
|
||||
if (logMessageLength.minValue != logMessageLength.maxValue) {
|
||||
wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
|
||||
"Multiple log messages with conflicting number of place holders"));
|
||||
continue;
|
||||
}
|
||||
BasicValue varArgsSizeObject = getStackValue(arraySizeFrames[i], 0); // last argument
|
||||
if (varArgsSizeObject instanceof ArraySizeBasicValue == false) {
|
||||
wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
|
||||
|
@ -268,6 +255,24 @@ public class ESLoggerUsageChecker {
|
|||
continue;
|
||||
}
|
||||
ArraySizeBasicValue varArgsSize = (ArraySizeBasicValue) varArgsSizeObject;
|
||||
Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
|
||||
BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument
|
||||
if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) {
|
||||
if (varArgsSize.minValue > 0) {
|
||||
wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
|
||||
"First argument must be a string constant so that we can statically ensure proper place holder usage"));
|
||||
continue;
|
||||
} else {
|
||||
// don't check logger usage for logger.warn(someObject) as someObject will be fully logged
|
||||
continue;
|
||||
}
|
||||
}
|
||||
PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject;
|
||||
if (logMessageLength.minValue != logMessageLength.maxValue) {
|
||||
wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
|
||||
"Multiple log messages with conflicting number of place holders"));
|
||||
continue;
|
||||
}
|
||||
if (varArgsSize.minValue != varArgsSize.maxValue) {
|
||||
wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
|
||||
"Multiple parameter arrays with conflicting sizes"));
|
||||
|
|
|
@ -129,8 +129,12 @@ public class ESLoggerUsageTests extends ESTestCase {
|
|||
logger.info("Hello {}, {}", "world", 42, new Exception());
|
||||
}
|
||||
|
||||
public void checkFailNonConstantMessage(boolean b) {
|
||||
logger.info(Boolean.toString(b));
|
||||
public void checkNonConstantMessageWithZeroArguments(boolean b) {
|
||||
logger.info(Boolean.toString(b), new Exception());
|
||||
}
|
||||
|
||||
public void checkFailNonConstantMessageWithArguments(boolean b) {
|
||||
logger.info(Boolean.toString(b), new Exception(), 42);
|
||||
}
|
||||
|
||||
public void checkComplexUsage(boolean b) {
|
||||
|
|
Loading…
Reference in New Issue