Use try-with-resources with MockLogAppender (#1595)

I previously added a helper that started a MockLogAppender to ensure it
was never added to a Logger before it was started. I subsequently found
the opposite case in RolloverIT.java where the appender was stopped
before it was closed, therefore creating a race where a concurrently
running test in the same JVM could cause a logging failure. This seems
like a really easy mistake to make when writing a test or introduce when
refactoring a test. I've made a change to use try-with-resources to
ensure that proper setup and teardown is done. This should make it much
harder to introduce this particular test bug in the future.
Unfortunately, it did involve touching a lot of files. The changes here
are purely structural to leverage try-with-resources; no testing logic
has been changed.

Signed-off-by: Andrew Ross <andrross@amazon.com>
This commit is contained in:
Andrew Ross 2021-12-21 15:00:55 -06:00 committed by GitHub
parent ca40ba9c64
commit 96d55966a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 839 additions and 1021 deletions

View File

@ -36,7 +36,6 @@ import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.opensearch.OpenSearchNetty4IntegTestCase;
import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
import org.opensearch.common.logging.Loggers;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.MockLogAppender;
@ -53,17 +52,15 @@ public class OpenSearchLoggingHandlerIT extends OpenSearchNetty4IntegTestCase {
public void setUp() throws Exception {
super.setUp();
appender = MockLogAppender.createStarted();
Loggers.addAppender(LogManager.getLogger(OpenSearchLoggingHandler.class), appender);
Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender);
Loggers.addAppender(LogManager.getLogger(TcpTransport.class), appender);
appender = MockLogAppender.createForLoggers(
LogManager.getLogger(OpenSearchLoggingHandler.class),
LogManager.getLogger(TransportLogger.class),
LogManager.getLogger(TcpTransport.class)
);
}
public void tearDown() throws Exception {
Loggers.removeAppender(LogManager.getLogger(OpenSearchLoggingHandler.class), appender);
Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender);
Loggers.removeAppender(LogManager.getLogger(TcpTransport.class), appender);
appender.stop();
appender.close();
super.tearDown();
}

View File

@ -37,7 +37,6 @@ import org.apache.logging.log4j.LogManager;
import org.opensearch.NioIntegTestCase;
import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
import org.opensearch.common.logging.Loggers;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.MockLogAppender;
@ -54,15 +53,11 @@ public class NioTransportLoggingIT extends NioIntegTestCase {
public void setUp() throws Exception {
super.setUp();
appender = MockLogAppender.createStarted();
Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender);
Loggers.addAppender(LogManager.getLogger(TcpTransport.class), appender);
appender = MockLogAppender.createForLoggers(LogManager.getLogger(TransportLogger.class), LogManager.getLogger(TcpTransport.class));
}
public void tearDown() throws Exception {
Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender);
Loggers.removeAppender(LogManager.getLogger(TcpTransport.class), appender);
appender.stop();
appender.close();
super.tearDown();
}

View File

@ -44,7 +44,6 @@ import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.metadata.AutoExpandReplicas;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.allocation.AllocationService;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.time.DateFormatter;
import org.opensearch.common.unit.ByteSizeUnit;
@ -263,23 +262,21 @@ public class RolloverIT extends OpenSearchIntegTestCase {
ensureGreen();
Logger allocationServiceLogger = LogManager.getLogger(AllocationService.class);
MockLogAppender appender = new MockLogAppender();
appender.start();
appender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"no related message logged on dry run",
AllocationService.class.getName(),
Level.INFO,
"*test_index*"
)
);
Loggers.addAppender(allocationServiceLogger, appender);
final RolloverResponse response;
try (MockLogAppender appender = MockLogAppender.createForLoggers(allocationServiceLogger)) {
appender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"no related message logged on dry run",
AllocationService.class.getName(),
Level.INFO,
"*test_index*"
)
);
final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").dryRun(true).get();
response = client().admin().indices().prepareRolloverIndex("test_alias").dryRun(true).get();
appender.assertAllExpectationsMatched();
appender.stop();
Loggers.removeAppender(allocationServiceLogger, appender);
appender.assertAllExpectationsMatched();
}
assertThat(response.getOldIndex(), equalTo("test_index-1"));
assertThat(response.getNewIndex(), equalTo("test_index-000002"));

View File

@ -59,7 +59,6 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider
import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.opensearch.common.Priority;
import org.opensearch.common.io.FileSystemUtils;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.core.internal.io.IOUtils;
@ -458,74 +457,68 @@ public class ClusterRerouteIT extends OpenSearchIntegTestCase {
Logger actionLogger = LogManager.getLogger(TransportClusterRerouteAction.class);
MockLogAppender dryRunMockLog = new MockLogAppender();
dryRunMockLog.start();
dryRunMockLog.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"no completed message logged on dry run",
TransportClusterRerouteAction.class.getName(),
Level.INFO,
"allocated an empty primary*"
)
);
Loggers.addAppender(actionLogger, dryRunMockLog);
try (MockLogAppender dryRunMockLog = MockLogAppender.createForLoggers(actionLogger)) {
dryRunMockLog.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"no completed message logged on dry run",
TransportClusterRerouteAction.class.getName(),
Level.INFO,
"allocated an empty primary*"
)
);
AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
ClusterRerouteResponse dryRunResponse = client().admin()
.cluster()
.prepareReroute()
.setExplain(randomBoolean())
.setDryRun(true)
.add(dryRunAllocation)
.execute()
.actionGet();
AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
ClusterRerouteResponse dryRunResponse = client().admin()
.cluster()
.prepareReroute()
.setExplain(randomBoolean())
.setDryRun(true)
.add(dryRunAllocation)
.execute()
.actionGet();
// during a dry run, messages exist but are not logged or exposed
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1));
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
// during a dry run, messages exist but are not logged or exposed
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1));
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
dryRunMockLog.assertAllExpectationsMatched();
dryRunMockLog.stop();
Loggers.removeAppender(actionLogger, dryRunMockLog);
dryRunMockLog.assertAllExpectationsMatched();
}
MockLogAppender allocateMockLog = new MockLogAppender();
allocateMockLog.start();
allocateMockLog.addExpectation(
new MockLogAppender.SeenEventExpectation(
"message for first allocate empty primary",
TransportClusterRerouteAction.class.getName(),
Level.INFO,
"allocated an empty primary*" + nodeName1 + "*"
)
);
allocateMockLog.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"no message for second allocate empty primary",
TransportClusterRerouteAction.class.getName(),
Level.INFO,
"allocated an empty primary*" + nodeName2 + "*"
)
);
Loggers.addAppender(actionLogger, allocateMockLog);
try (MockLogAppender allocateMockLog = MockLogAppender.createForLoggers(actionLogger)) {
allocateMockLog.addExpectation(
new MockLogAppender.SeenEventExpectation(
"message for first allocate empty primary",
TransportClusterRerouteAction.class.getName(),
Level.INFO,
"allocated an empty primary*" + nodeName1 + "*"
)
);
allocateMockLog.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"no message for second allocate empty primary",
TransportClusterRerouteAction.class.getName(),
Level.INFO,
"allocated an empty primary*" + nodeName2 + "*"
)
);
AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true);
ClusterRerouteResponse response = client().admin()
.cluster()
.prepareReroute()
.setExplain(true) // so we get a NO decision back rather than an exception
.add(yesDecisionAllocation)
.add(noDecisionAllocation)
.execute()
.actionGet();
AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true);
ClusterRerouteResponse response = client().admin()
.cluster()
.prepareReroute()
.setExplain(true) // so we get a NO decision back rather than an exception
.add(yesDecisionAllocation)
.add(noDecisionAllocation)
.execute()
.actionGet();
assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1));
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString(nodeName1));
assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1));
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString(nodeName1));
allocateMockLog.assertAllExpectationsMatched();
allocateMockLog.stop();
Loggers.removeAppender(actionLogger, allocateMockLog);
allocateMockLog.assertAllExpectationsMatched();
}
}
public void testClusterRerouteWithBlocks() {

View File

@ -39,7 +39,6 @@ import org.apache.logging.log4j.core.LogEvent;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.coordination.JoinHelper;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.node.Node.DiscoverySettings;
import org.opensearch.test.OpenSearchIntegTestCase;
@ -119,73 +118,66 @@ public class SingleNodeDiscoveryIT extends OpenSearchIntegTestCase {
}
public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") {
Logger clusterLogger = LogManager.getLogger(JoinHelper.class);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(clusterLogger)) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") {
@Override
public boolean innerMatch(final LogEvent event) {
return event.getThrown() != null
&& event.getThrown().getClass() == RemoteTransportException.class
&& event.getThrown().getCause() != null
&& event.getThrown().getCause().getClass() == IllegalStateException.class
&& event.getThrown()
.getCause()
.getMessage()
.contains("cannot join node with [discovery.type] set to [single-node]");
}
}
);
final TransportService service = internalCluster().getInstance(TransportService.class);
final int port = service.boundAddress().publishAddress().getPort();
final NodeConfigurationSource configurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put("discovery.type", "zen")
.put("transport.type", getTestTransportType())
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s")
/*
* We align the port ranges of the two as then with zen discovery these two
* nodes would find each other.
*/
.put("transport.port", port + "-" + (port + 5 - 1))
.build();
}
@Override
public boolean innerMatch(final LogEvent event) {
return event.getThrown() != null
&& event.getThrown().getClass() == RemoteTransportException.class
&& event.getThrown().getCause() != null
&& event.getThrown().getCause().getClass() == IllegalStateException.class
&& event.getThrown()
.getCause()
.getMessage()
.contains("cannot join node with [discovery.type] set to [single-node]");
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
}
);
final TransportService service = internalCluster().getInstance(TransportService.class);
final int port = service.boundAddress().publishAddress().getPort();
final NodeConfigurationSource configurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put("discovery.type", "zen")
.put("transport.type", getTestTransportType())
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s")
/*
* We align the port ranges of the two as then with zen discovery these two
* nodes would find each other.
*/
.put("transport.port", port + "-" + (port + 5 - 1))
.build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
};
try (
InternalTestCluster other = new InternalTestCluster(
randomLong(),
createTempDir(),
false,
false,
1,
1,
internalCluster().getClusterName(),
configurationSource,
0,
"other",
Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class),
Function.identity()
)
) {
Logger clusterLogger = LogManager.getLogger(JoinHelper.class);
Loggers.addAppender(clusterLogger, mockAppender);
try {
};
try (
InternalTestCluster other = new InternalTestCluster(
randomLong(),
createTempDir(),
false,
false,
1,
1,
internalCluster().getClusterName(),
configurationSource,
0,
"other",
Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class),
Function.identity()
)
) {
other.beforeTest(random(), 0);
final ClusterState first = internalCluster().getInstance(ClusterService.class).state();
assertThat(first.nodes().getSize(), equalTo(1));
assertBusy(() -> mockAppender.assertAllExpectationsMatched());
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
}
}
}

View File

@ -40,7 +40,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.Constants;
import org.opensearch.cluster.metadata.Metadata;
import org.opensearch.common.io.PathUtils;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.test.AbstractBootstrapCheckTestCase;
import org.opensearch.test.MockLogAppender;
@ -157,48 +156,42 @@ public class MaxMapCountCheckTests extends AbstractBootstrapCheckTestCase {
final IOException ioException = new IOException("fatal");
when(reader.readLine()).thenThrow(ioException);
final Logger logger = LogManager.getLogger("testGetMaxMapCountIOException");
final MockLogAppender appender = new MockLogAppender();
appender.start();
appender.addExpectation(
new ParameterizedMessageLoggingExpectation(
"expected logged I/O exception",
"testGetMaxMapCountIOException",
Level.WARN,
"I/O exception while trying to read [{}]",
new Object[] { procSysVmMaxMapCountPath },
e -> ioException == e
)
);
Loggers.addAppender(logger, appender);
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
appender.assertAllExpectationsMatched();
verify(reader).close();
Loggers.removeAppender(logger, appender);
appender.stop();
try (MockLogAppender appender = MockLogAppender.createForLoggers(logger)) {
appender.addExpectation(
new ParameterizedMessageLoggingExpectation(
"expected logged I/O exception",
"testGetMaxMapCountIOException",
Level.WARN,
"I/O exception while trying to read [{}]",
new Object[] { procSysVmMaxMapCountPath },
e -> ioException == e
)
);
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
appender.assertAllExpectationsMatched();
verify(reader).close();
}
}
{
reset(reader);
when(reader.readLine()).thenReturn("eof");
final Logger logger = LogManager.getLogger("testGetMaxMapCountNumberFormatException");
final MockLogAppender appender = new MockLogAppender();
appender.start();
appender.addExpectation(
new ParameterizedMessageLoggingExpectation(
"expected logged number format exception",
"testGetMaxMapCountNumberFormatException",
Level.WARN,
"unable to parse vm.max_map_count [{}]",
new Object[] { "eof" },
e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"")
)
);
Loggers.addAppender(logger, appender);
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
appender.assertAllExpectationsMatched();
verify(reader).close();
Loggers.removeAppender(logger, appender);
appender.stop();
try (MockLogAppender appender = MockLogAppender.createForLoggers(logger)) {
appender.addExpectation(
new ParameterizedMessageLoggingExpectation(
"expected logged number format exception",
"testGetMaxMapCountNumberFormatException",
Level.WARN,
"unable to parse vm.max_map_count [{}]",
new Object[] { "eof" },
e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"")
)
);
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
appender.assertAllExpectationsMatched();
verify(reader).close();
}
}
}

View File

@ -34,6 +34,7 @@ package org.opensearch.cluster;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.opensearch.OpenSearchTimeoutException;
import org.opensearch.Version;
import org.opensearch.action.ActionListener;
@ -46,7 +47,6 @@ import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.UUIDs;
import org.opensearch.common.component.Lifecycle;
import org.opensearch.common.component.LifecycleListener;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.transport.BoundTransportAddress;
import org.opensearch.common.transport.TransportAddress;
@ -352,10 +352,8 @@ public class NodeConnectionsServiceTests extends OpenSearchTestCase {
for (DiscoveryNode disconnectedNode : disconnectedNodes) {
transportService.disconnectFromNode(disconnectedNode);
}
MockLogAppender appender = new MockLogAppender();
try {
appender.start();
Loggers.addAppender(LogManager.getLogger("org.opensearch.cluster.NodeConnectionsService"), appender);
final Logger logger = LogManager.getLogger("org.opensearch.cluster.NodeConnectionsService");
try (MockLogAppender appender = MockLogAppender.createForLoggers(logger)) {
for (DiscoveryNode targetNode : targetNodes) {
if (disconnectedNodes.contains(targetNode)) {
appender.addExpectation(
@ -396,9 +394,6 @@ public class NodeConnectionsServiceTests extends OpenSearchTestCase {
runTasksUntil(deterministicTaskQueue, CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(Settings.EMPTY).millis());
appender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger("org.opensearch.cluster.NodeConnectionsService"), appender);
appender.stop();
}
for (DiscoveryNode disconnectedNode : disconnectedNodes) {
transportService.disconnectFromNode(disconnectedNode);
@ -409,10 +404,8 @@ public class NodeConnectionsServiceTests extends OpenSearchTestCase {
for (DiscoveryNode disconnectedNode : disconnectedNodes) {
transportService.disconnectFromNode(disconnectedNode);
}
appender = new MockLogAppender();
try {
appender.start();
Loggers.addAppender(LogManager.getLogger("org.opensearch.cluster.NodeConnectionsService"), appender);
try (MockLogAppender appender = MockLogAppender.createForLoggers(logger)) {
for (DiscoveryNode targetNode : targetNodes) {
if (disconnectedNodes.contains(targetNode) && newTargetNodes.get(targetNode.getId()) != null) {
appender.addExpectation(
@ -493,9 +486,6 @@ public class NodeConnectionsServiceTests extends OpenSearchTestCase {
service.connectToNodes(newTargetNodes, () -> {});
deterministicTaskQueue.runAllRunnableTasks();
appender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger("org.opensearch.cluster.NodeConnectionsService"), appender);
appender.stop();
}
}

View File

@ -33,7 +33,6 @@ package org.opensearch.cluster.coordination;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.opensearch.OpenSearchException;
import org.opensearch.Version;
@ -48,7 +47,6 @@ import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.node.DiscoveryNodeRole;
import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.regex.Regex;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.settings.Settings.Builder;
@ -1265,19 +1263,12 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
cluster1.clusterNodes.add(newNode);
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation("test1", JoinHelper.class.getCanonicalName(), Level.INFO, "*failed to join*")
);
Logger joinLogger = LogManager.getLogger(JoinHelper.class);
Loggers.addAppender(joinLogger, mockAppender);
cluster1.runFor(DEFAULT_STABILISATION_TIME, "failing join validation");
try {
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(JoinHelper.class))) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation("test1", JoinHelper.class.getCanonicalName(), Level.INFO, "*failed to join*")
);
cluster1.runFor(DEFAULT_STABILISATION_TIME, "failing join validation");
mockAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(joinLogger, mockAppender);
mockAppender.stop();
}
assertEquals(0, newNode.getLastAppliedClusterState().version());
@ -1554,10 +1545,11 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
}
for (int i = scaledRandomIntBetween(1, 10); i >= 0; i--) {
final MockLogAppender mockLogAppender = new MockLogAppender();
try {
mockLogAppender.start();
Loggers.addAppender(LogManager.getLogger(ClusterFormationFailureHelper.class), mockLogAppender);
try (
MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(
LogManager.getLogger(ClusterFormationFailureHelper.class)
)
) {
mockLogAppender.addExpectation(new MockLogAppender.LoggingExpectation() {
final Set<DiscoveryNode> nodesLogged = new HashSet<>();
@ -1599,9 +1591,6 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
});
cluster.runFor(warningDelayMillis + DEFAULT_DELAY_VARIABILITY, "waiting for warning to be emitted");
mockLogAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger(ClusterFormationFailureHelper.class), mockLogAppender);
mockLogAppender.stop();
}
}
}
@ -1613,11 +1602,12 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
cluster.stabilise();
final ClusterNode brokenNode = cluster.getAnyNodeExcept(cluster.getAnyLeader());
final MockLogAppender mockLogAppender = new MockLogAppender();
try {
mockLogAppender.start();
Loggers.addAppender(LogManager.getLogger(Coordinator.CoordinatorPublication.class), mockLogAppender);
Loggers.addAppender(LogManager.getLogger(LagDetector.class), mockLogAppender);
try (
MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(
LogManager.getLogger(Coordinator.CoordinatorPublication.class),
LogManager.getLogger(LagDetector.class)
)
) {
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
@ -1683,10 +1673,6 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
);
mockLogAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger(Coordinator.CoordinatorPublication.class), mockLogAppender);
Loggers.removeAppender(LogManager.getLogger(LagDetector.class), mockLogAppender);
mockLogAppender.stop();
}
}
}

View File

@ -33,7 +33,6 @@ package org.opensearch.cluster.routing.allocation;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.opensearch.Version;
import org.opensearch.action.ActionListener;
import org.opensearch.cluster.ClusterInfo;
@ -51,7 +50,6 @@ import org.opensearch.cluster.routing.RoutingTable;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.common.Priority;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.shard.ShardId;
@ -659,25 +657,30 @@ public class DiskThresholdMonitorTests extends OpenSearchAllocationTestCase {
private void assertNoLogging(DiskThresholdMonitor monitor, ImmutableOpenMap<String, DiskUsage> diskUsages)
throws IllegalAccessException {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation("any INFO message", DiskThresholdMonitor.class.getCanonicalName(), Level.INFO, "*")
);
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation("any WARN message", DiskThresholdMonitor.class.getCanonicalName(), Level.WARN, "*")
);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) {
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"any INFO message",
DiskThresholdMonitor.class.getCanonicalName(),
Level.INFO,
"*"
)
);
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"any WARN message",
DiskThresholdMonitor.class.getCanonicalName(),
Level.WARN,
"*"
)
);
Logger diskThresholdMonitorLogger = LogManager.getLogger(DiskThresholdMonitor.class);
Loggers.addAppender(diskThresholdMonitorLogger, mockAppender);
for (int i = between(1, 3); i >= 0; i--) {
monitor.onNewInfo(clusterInfo(diskUsages));
}
for (int i = between(1, 3); i >= 0; i--) {
monitor.onNewInfo(clusterInfo(diskUsages));
mockAppender.assertAllExpectationsMatched();
}
mockAppender.assertAllExpectationsMatched();
Loggers.removeAppender(diskThresholdMonitorLogger, mockAppender);
mockAppender.stop();
}
private void assertRepeatedWarningMessages(DiskThresholdMonitor monitor, ImmutableOpenMap<String, DiskUsage> diskUsages, String message)
@ -701,28 +704,24 @@ public class DiskThresholdMonitorTests extends OpenSearchAllocationTestCase {
private void assertLogging(DiskThresholdMonitor monitor, ImmutableOpenMap<String, DiskUsage> diskUsages, Level level, String message)
throws IllegalAccessException {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation("expected message", DiskThresholdMonitor.class.getCanonicalName(), level, message)
);
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"any message of another level",
DiskThresholdMonitor.class.getCanonicalName(),
level == Level.INFO ? Level.WARN : Level.INFO,
"*"
)
);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) {
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation("expected message", DiskThresholdMonitor.class.getCanonicalName(), level, message)
);
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"any message of another level",
DiskThresholdMonitor.class.getCanonicalName(),
level == Level.INFO ? Level.WARN : Level.INFO,
"*"
)
);
Logger diskThresholdMonitorLogger = LogManager.getLogger(DiskThresholdMonitor.class);
Loggers.addAppender(diskThresholdMonitorLogger, mockAppender);
monitor.onNewInfo(clusterInfo(diskUsages));
monitor.onNewInfo(clusterInfo(diskUsages));
mockAppender.assertAllExpectationsMatched();
Loggers.removeAppender(diskThresholdMonitorLogger, mockAppender);
mockAppender.stop();
mockAppender.assertAllExpectationsMatched();
}
}
private static ClusterInfo clusterInfo(ImmutableOpenMap<String, DiskUsage> diskUsages) {

View File

@ -34,7 +34,6 @@ package org.opensearch.cluster.service;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.opensearch.Version;
import org.opensearch.cluster.ClusterName;
import org.opensearch.cluster.ClusterState;
@ -47,7 +46,6 @@ import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
@ -131,36 +129,32 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
@TestLogging(value = "org.opensearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level")
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1",
ClusterApplierService.class.getCanonicalName(),
Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster state"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
ClusterApplierService.class.getCanonicalName(),
Level.TRACE,
"*failed to execute cluster state applier in [2s]*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
ClusterApplierService.class.getCanonicalName(),
Level.DEBUG,
"*processing [test3]: took [0s] no change in cluster state*"
)
);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterApplierService.class))) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1",
ClusterApplierService.class.getCanonicalName(),
Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster state"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
ClusterApplierService.class.getCanonicalName(),
Level.TRACE,
"*failed to execute cluster state applier in [2s]*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
ClusterApplierService.class.getCanonicalName(),
Level.DEBUG,
"*processing [test3]: took [0s] no change in cluster state*"
)
);
Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class);
Loggers.addAppender(clusterLogger, mockAppender);
try {
clusterApplierService.currentTimeOverride = threadPool.relativeTimeInMillis();
clusterApplierService.runOnApplierThread(
"test1",
@ -198,46 +192,39 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
}
});
assertBusy(mockAppender::assertAllExpectationsMatched);
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
}
}
@TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level")
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't see because setting is too low",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test1] took [*] which is above the warn threshold of *"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test2] took [32s] which is above the warn threshold of [*]: "
+ "[running task [test2]] took [*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test3] took [34s] which is above the warn threshold of [*]: "
+ "[running task [test3]] took [*"
)
);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterApplierService.class))) {
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't see because setting is too low",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test1] took [*] which is above the warn threshold of *"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test2] took [32s] which is above the warn threshold of [*]: "
+ "[running task [test2]] took [*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test3] took [34s] which is above the warn threshold of [*]: "
+ "[running task [test3]] took [*"
)
);
Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class);
Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
clusterApplierService.currentTimeOverride = threadPool.relativeTimeInMillis();
@ -301,11 +288,8 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
}
});
latch.await();
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
mockAppender.assertAllExpectationsMatched();
}
mockAppender.assertAllExpectationsMatched();
}
public void testLocalNodeMasterListenerCallbacks() {

View File

@ -34,7 +34,6 @@ package org.opensearch.cluster.service;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.opensearch.OpenSearchException;
import org.opensearch.Version;
import org.opensearch.cluster.AckedClusterStateUpdateTask;
@ -54,7 +53,6 @@ import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.common.Nullable;
import org.opensearch.common.Priority;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
@ -327,168 +325,163 @@ public class MasterServiceTests extends OpenSearchTestCase {
@TestLogging(value = "org.opensearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level")
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1 start",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test1]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1 computation",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [1s] to compute cluster state update for [test1]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1 notification",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [0s] to notify listeners on unchanged cluster state for [test1]"
)
);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1 start",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test1]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1 computation",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [1s] to compute cluster state update for [test1]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1 notification",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [0s] to notify listeners on unchanged cluster state for [test1]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 start",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 failure",
MasterService.class.getCanonicalName(),
Level.TRACE,
"failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 computation",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [2s] to compute cluster state update for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 notification",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [0s] to notify listeners on unchanged cluster state for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 start",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 failure",
MasterService.class.getCanonicalName(),
Level.TRACE,
"failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 computation",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [2s] to compute cluster state update for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2 notification",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [0s] to notify listeners on unchanged cluster state for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3 start",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3 computation",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [3s] to compute cluster state update for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3 notification",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3 start",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3 computation",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [3s] to compute cluster state update for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3 notification",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test4]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [test4]"
)
);
Logger clusterLogger = LogManager.getLogger(MasterService.class);
Loggers.addAppender(clusterLogger, mockAppender);
try (MasterService masterService = createMasterService(true)) {
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis();
return currentState;
}
try (MasterService masterService = createMasterService(true)) {
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {}
});
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void onFailure(String source, Exception e) {}
});
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(4).millis();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(4).millis();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
assertBusy(mockAppender::assertAllExpectationsMatched);
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
assertBusy(mockAppender::assertAllExpectationsMatched);
}
}
}
@ -741,233 +734,228 @@ public class MasterServiceTests extends OpenSearchTestCase {
@TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level")
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't log because it was fast enough",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took*test1*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took [*], which is over [10s], to compute cluster state update for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took [*], which is over [10s], to compute cluster state update for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took [*], which is over [10s], to compute cluster state update for [test4]"
)
);
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test5 should not log despite publishing slowly",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took*test5*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test6 should log due to slow and failing publication",
MasterService.class.getCanonicalName(),
Level.WARN,
"took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [test6]:*"
)
);
Logger clusterLogger = LogManager.getLogger(MasterService.class);
Loggers.addAppender(clusterLogger, mockAppender);
try (
MasterService masterService = new MasterService(
Settings.builder()
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
.put(Node.NODE_NAME_SETTING.getKey(), "test_node")
.build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool
)
) {
final DiscoveryNode localNode = new DiscoveryNode(
"node1",
buildNewFakeTransportAddress(),
emptyMap(),
emptySet(),
Version.CURRENT
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) {
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't log because it was fast enough",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took*test1*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took [*], which is over [10s], to compute cluster state update for [test2]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took [*], which is over [10s], to compute cluster state update for [test3]"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took [*], which is over [10s], to compute cluster state update for [test4]"
)
);
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test5 should not log despite publishing slowly",
MasterService.class.getCanonicalName(),
Level.WARN,
"*took*test5*"
)
);
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test6 should log due to slow and failing publication",
MasterService.class.getCanonicalName(),
Level.WARN,
"took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [test6]:*"
)
);
final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
.build();
final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
if (event.source().contains("test5")) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
+ randomLongBetween(1, 1000000);
}
if (event.source().contains("test6")) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
+ randomLongBetween(1, 1000000);
throw new OpenSearchException("simulated error during slow publication which should trigger logging");
}
clusterStateRef.set(event.state());
publishListener.onResponse(null);
});
masterService.setClusterStateSupplier(clusterStateRef::get);
masterService.start();
final CountDownLatch latch = new CountDownLatch(6);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += randomLongBetween(
0L,
MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
);
return currentState;
}
try (
MasterService masterService = new MasterService(
Settings.builder()
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
.put(Node.NODE_NAME_SETTING.getKey(), "test_node")
.build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool
)
) {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
processedFirstTask.countDown();
}
final DiscoveryNode localNode = new DiscoveryNode(
"node1",
buildNewFakeTransportAddress(),
emptyMap(),
emptySet(),
Version.CURRENT
);
final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
.build();
final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
if (event.source().contains("test5")) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY)
.millis() + randomLongBetween(1, 1000000);
}
if (event.source().contains("test6")) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY)
.millis() + randomLongBetween(1, 1000000);
throw new OpenSearchException("simulated error during slow publication which should trigger logging");
}
clusterStateRef.set(event.state());
publishListener.onResponse(null);
});
masterService.setClusterStateSupplier(clusterStateRef::get);
masterService.start();
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
final CountDownLatch latch = new CountDownLatch(6);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += randomLongBetween(
0L,
MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
);
return currentState;
}
processedFirstTask.await();
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
+ randomLongBetween(1, 1000000);
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
processedFirstTask.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
@Override
public void onFailure(String source, Exception e) {
latch.countDown();
}
});
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
+ randomLongBetween(1, 1000000);
return ClusterState.builder(currentState).incrementVersion().build();
}
processedFirstTask.await();
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY)
.millis() + randomLongBetween(1, 1000000);
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
+ randomLongBetween(1, 1000000);
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
latch.countDown();
}
});
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY)
.millis() + randomLongBetween(1, 1000000);
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY)
.millis() + randomLongBetween(1, 1000000);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail(); // maybe we should notify here?
}
});
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
latch.await();
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
@Override
public void onFailure(String source, Exception e) {
fail(); // maybe we should notify here?
}
});
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
latch.await();
}
mockAppender.assertAllExpectationsMatched();
}
mockAppender.assertAllExpectationsMatched();
}
public void testAcking() throws InterruptedException {

View File

@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.AbstractScopedSettings.SettingUpdater;
import org.opensearch.common.settings.Setting.Property;
import org.opensearch.common.unit.ByteSizeUnit;
@ -1479,32 +1478,26 @@ public class SettingTests extends OpenSearchTestCase {
);
final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY);
final MockLogAppender mockLogAppender = new MockLogAppender();
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"message",
"org.opensearch.common.settings.IndexScopedSettings",
Level.INFO,
"updating [index.refresh_interval] from [20s] to [10s]"
) {
@Override
public boolean innerMatch(LogEvent event) {
return event.getMarker().getName().equals(" [index1]");
}
}
);
mockLogAppender.start();
final Logger logger = LogManager.getLogger(IndexScopedSettings.class);
try {
Loggers.addAppender(logger, mockLogAppender);
try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(logger)) {
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"message",
"org.opensearch.common.settings.IndexScopedSettings",
Level.INFO,
"updating [index.refresh_interval] from [20s] to [10s]"
) {
@Override
public boolean innerMatch(LogEvent event) {
return event.getMarker().getName().equals(" [index1]");
}
}
);
settings.updateIndexMetadata(
newIndexMeta("index1", Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s").build())
);
mockLogAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(logger, mockLogAppender);
mockLogAppender.stop();
}
}
}

View File

@ -35,7 +35,6 @@ import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.opensearch.common.Strings;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Setting.Property;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.json.JsonXContent;
@ -143,14 +142,10 @@ public class SettingsFilterTests extends OpenSearchTestCase {
private void assertExpectedLogMessages(Consumer<Logger> consumer, MockLogAppender.LoggingExpectation... expectations)
throws IllegalAccessException {
Logger testLogger = LogManager.getLogger("org.opensearch.test");
MockLogAppender appender = MockLogAppender.createStarted();
Loggers.addAppender(testLogger, appender);
try {
try (MockLogAppender appender = MockLogAppender.createForLoggers(testLogger)) {
Arrays.stream(expectations).forEach(appender::addExpectation);
consumer.accept(testLogger);
appender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(testLogger, appender);
}
}

View File

@ -42,7 +42,6 @@ import org.opensearch.action.ActionListener;
import org.opensearch.cluster.ClusterName;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.common.Nullable;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.transport.TransportAddress;
import org.opensearch.test.OpenSearchTestCase;
@ -174,26 +173,20 @@ public class HandshakingTransportAddressConnectorTests extends OpenSearchTestCas
FailureListener failureListener = new FailureListener();
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"message",
HandshakingTransportAddressConnector.class.getCanonicalName(),
Level.WARN,
"*completed handshake with [*] but followup connection failed*"
)
);
Logger targetLogger = LogManager.getLogger(HandshakingTransportAddressConnector.class);
Loggers.addAppender(targetLogger, mockAppender);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(targetLogger)) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"message",
HandshakingTransportAddressConnector.class.getCanonicalName(),
Level.WARN,
"*completed handshake with [*] but followup connection failed*"
)
);
try {
handshakingTransportAddressConnector.connectToRemoteMasterNode(discoveryAddress, failureListener);
failureListener.assertFailure();
mockAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(targetLogger, mockAppender);
mockAppender.stop();
}
}

View File

@ -51,7 +51,6 @@ import org.opensearch.cluster.routing.RoutingTable;
import org.opensearch.cluster.routing.allocation.AllocationService;
import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.xcontent.NamedXContentRegistry;
@ -568,18 +567,11 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
IncrementalClusterStateWriter incrementalClusterStateWriter,
MockLogAppender.LoggingExpectation expectation
) throws IllegalAccessException, WriteStateException {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(expectation);
Logger classLogger = LogManager.getLogger(IncrementalClusterStateWriter.class);
Loggers.addAppender(classLogger, mockAppender);
try {
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(classLogger)) {
mockAppender.addExpectation(expectation);
incrementalClusterStateWriter.updateClusterState(clusterState);
} finally {
Loggers.removeAppender(classLogger, mockAppender);
mockAppender.stop();
mockAppender.assertAllExpectationsMatched();
}
mockAppender.assertAllExpectationsMatched();
}
}

View File

@ -51,7 +51,6 @@ import org.opensearch.cluster.metadata.Metadata;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.common.UUIDs;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.BigArrays;
@ -1178,23 +1177,16 @@ public class PersistedClusterStateServiceTests extends OpenSearchTestCase {
PersistedClusterStateService.Writer writer,
MockLogAppender.LoggingExpectation expectation
) throws IllegalAccessException, IOException {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(expectation);
Logger classLogger = LogManager.getLogger(PersistedClusterStateService.class);
Loggers.addAppender(classLogger, mockAppender);
try {
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(classLogger)) {
mockAppender.addExpectation(expectation);
if (previousState == null) {
writer.writeFullStateAndCommit(currentTerm, clusterState);
} else {
writer.writeIncrementalStateAndCommit(currentTerm, previousState, clusterState);
}
} finally {
Loggers.removeAppender(classLogger, mockAppender);
mockAppender.stop();
mockAppender.assertAllExpectationsMatched();
}
mockAppender.assertAllExpectationsMatched();
}
@Override

View File

@ -37,7 +37,6 @@ import org.apache.logging.log4j.LogManager;
import org.opensearch.common.UUIDs;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.network.NetworkService;
import org.opensearch.common.network.NetworkUtils;
import org.opensearch.common.settings.ClusterSettings;
@ -268,12 +267,8 @@ public class AbstractHttpServerTransportTests extends OpenSearchTestCase {
.put(HttpTransportSettings.SETTING_HTTP_TRACE_LOG_EXCLUDE.getKey(), excludeSettings)
.build()
);
MockLogAppender appender = new MockLogAppender();
final String traceLoggerName = "org.opensearch.http.HttpTracer";
try {
appender.start();
Loggers.addAppender(LogManager.getLogger(traceLoggerName), appender);
try (MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getLogger(traceLoggerName))) {
final String opaqueId = UUIDs.randomBase64UUID(random());
appender.addExpectation(
new MockLogAppender.PatternSeenEventExpectation(
@ -342,9 +337,6 @@ public class AbstractHttpServerTransportTests extends OpenSearchTestCase {
transport.incomingRequest(fakeRestRequestExcludedPath.getHttpRequest(), fakeRestRequestExcludedPath.getHttpChannel());
appender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger(traceLoggerName), appender);
appender.stop();
}
}
}

View File

@ -34,7 +34,6 @@ package org.opensearch.ingest;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.opensearch.OpenSearchParseException;
import org.opensearch.ResourceNotFoundException;
@ -59,7 +58,6 @@ import org.opensearch.cluster.metadata.Metadata;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.xcontent.XContentBuilder;
@ -544,24 +542,17 @@ public class IngestServiceTests extends OpenSearchTestCase {
);
ClusterState previousClusterState = clusterState;
clusterState = IngestService.innerPut(putRequest, clusterState);
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1",
IngestService.class.getCanonicalName(),
Level.WARN,
"failed to update ingest pipelines"
)
);
Logger ingestLogger = LogManager.getLogger(IngestService.class);
Loggers.addAppender(ingestLogger, mockAppender);
try {
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(IngestService.class))) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1",
IngestService.class.getCanonicalName(),
Level.WARN,
"failed to update ingest pipelines"
)
);
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));
mockAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(ingestLogger, mockAppender);
mockAppender.stop();
}
pipeline = ingestService.getPipeline(id);
assertNotNull(pipeline);

View File

@ -34,13 +34,11 @@ package org.opensearch.monitor.fs;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.mockfile.FilterFileChannel;
import org.apache.lucene.mockfile.FilterFileSystemProvider;
import org.opensearch.cluster.coordination.DeterministicTaskQueue;
import org.opensearch.common.io.PathUtils;
import org.opensearch.common.io.PathUtilsForTesting;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.env.NodeEnvironment;
@ -158,12 +156,10 @@ public class FsHealthServiceTests extends OpenSearchTestCase {
PathUtilsForTesting.installMock(fileSystem);
final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
Logger logger = LogManager.getLogger(FsHealthService.class);
Loggers.addAppender(logger, mockAppender);
try (NodeEnvironment env = newNodeEnvironment()) {
try (
MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(FsHealthService.class));
NodeEnvironment env = newNodeEnvironment()
) {
FsHealthService fsHealthService = new FsHealthService(settings, clusterSettings, testThreadPool, env);
int counter = 0;
for (Path path : env.nodeDataPaths()) {
@ -183,8 +179,6 @@ public class FsHealthServiceTests extends OpenSearchTestCase {
assertEquals(env.nodeDataPaths().length, disruptFileSystemProvider.getInjectedPathCount());
assertBusy(mockAppender::assertAllExpectationsMatched);
} finally {
Loggers.removeAppender(logger, mockAppender);
mockAppender.stop();
PathUtilsForTesting.teardown();
ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS);
}

View File

@ -32,9 +32,7 @@
package org.opensearch.plugins;
import org.opensearch.common.logging.Loggers;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.LuceneTestCase;
@ -737,45 +735,43 @@ public class PluginsServiceTests extends OpenSearchTestCase {
public void testFindPluginDirs() throws Exception {
final Path plugins = createTempDir();
final MockLogAppender mockLogAppender = new MockLogAppender();
mockLogAppender.start();
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"[.test] warning",
"org.opensearch.plugins.PluginsService",
Level.WARN,
"Non-plugin file located in the plugins folder with the following name: [.DS_Store]"
)
);
final Logger testLogger = LogManager.getLogger(PluginsService.class);
Loggers.addAppender(testLogger, mockLogAppender);
try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(PluginsService.class))) {
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"[.test] warning",
"org.opensearch.plugins.PluginsService",
Level.WARN,
"Non-plugin file located in the plugins folder with the following name: [.DS_Store]"
)
);
final Path fake = plugins.resolve("fake");
Path testFile = plugins.resolve(".DS_Store");
Files.createFile(testFile);
final Path fake = plugins.resolve("fake");
Path testFile = plugins.resolve(".DS_Store");
Files.createFile(testFile);
PluginTestUtil.writePluginProperties(
fake,
"description",
"description",
"name",
"fake",
"version",
"1.0.0",
"opensearch.version",
Version.CURRENT.toString(),
"java.version",
System.getProperty("java.specification.version"),
"classname",
"test.DummyPlugin"
);
PluginTestUtil.writePluginProperties(
fake,
"description",
"description",
"name",
"fake",
"version",
"1.0.0",
"opensearch.version",
Version.CURRENT.toString(),
"java.version",
System.getProperty("java.specification.version"),
"classname",
"test.DummyPlugin"
);
try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) {
Files.copy(jar, fake.resolve("plugin.jar"));
try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) {
Files.copy(jar, fake.resolve("plugin.jar"));
}
assertThat(PluginsService.findPluginDirs(plugins), containsInAnyOrder(fake));
mockLogAppender.assertAllExpectationsMatched();
}
assertThat(PluginsService.findPluginDirs(plugins), containsInAnyOrder(fake));
mockLogAppender.assertAllExpectationsMatched();
}
public void testExistingMandatoryClasspathPlugin() {

View File

@ -34,7 +34,6 @@ package org.opensearch.transport;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.opensearch.OpenSearchException;
import org.opensearch.Version;
import org.opensearch.action.ActionListener;
@ -46,7 +45,6 @@ import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.common.io.stream.InputStreamStreamInput;
import org.opensearch.common.io.stream.NamedWriteableRegistry;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.BigArrays;
@ -260,20 +258,16 @@ public class InboundHandlerTests extends OpenSearchTestCase {
// response so we must just close the connection on an error. To avoid the failure disappearing into a black hole we at least log
// it.
final MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"expected message",
InboundHandler.class.getCanonicalName(),
Level.WARN,
"could not send error response to handshake"
)
);
final Logger inboundHandlerLogger = LogManager.getLogger(InboundHandler.class);
Loggers.addAppender(inboundHandlerLogger, mockAppender);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"expected message",
InboundHandler.class.getCanonicalName(),
Level.WARN,
"could not send error response to handshake"
)
);
try {
final AtomicBoolean isClosed = new AtomicBoolean();
channel.addCloseListener(ActionListener.wrap(() -> assertTrue(isClosed.compareAndSet(false, true))));
@ -293,28 +287,21 @@ public class InboundHandlerTests extends OpenSearchTestCase {
assertTrue(isClosed.get());
assertNull(channel.getMessageCaptor().get());
mockAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(inboundHandlerLogger, mockAppender);
mockAppender.stop();
}
}
public void testLogsSlowInboundProcessing() throws Exception {
final MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"expected message",
InboundHandler.class.getCanonicalName(),
Level.WARN,
"handling inbound transport message "
)
);
final Logger inboundHandlerLogger = LogManager.getLogger(InboundHandler.class);
Loggers.addAppender(inboundHandlerLogger, mockAppender);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) {
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"expected message",
InboundHandler.class.getCanonicalName(),
Level.WARN,
"handling inbound transport message "
)
);
handler.setSlowLogThreshold(TimeValue.timeValueMillis(5L));
try {
handler.setSlowLogThreshold(TimeValue.timeValueMillis(5L));
final Version remoteVersion = Version.CURRENT;
final long requestId = randomNonNegativeLong();
final Header requestHeader = new Header(
@ -336,9 +323,6 @@ public class InboundHandlerTests extends OpenSearchTestCase {
handler.inboundMessage(channel, requestMessage);
assertNotNull(channel.getMessageCaptor().get());
mockAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(inboundHandlerLogger, mockAppender);
mockAppender.stop();
}
}

View File

@ -40,7 +40,6 @@ import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.common.component.Lifecycle;
import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.network.NetworkService;
import org.opensearch.common.network.NetworkUtils;
import org.opensearch.common.settings.Settings;
@ -524,12 +523,7 @@ public class TcpTransportTests extends OpenSearchTestCase {
MockLogAppender.LoggingExpectation... expectations
) throws IllegalAccessException {
final TestThreadPool testThreadPool = new TestThreadPool("test");
MockLogAppender appender = new MockLogAppender();
try {
appender.start();
Loggers.addAppender(LogManager.getLogger(TcpTransport.class), appender);
try (MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getLogger(TcpTransport.class))) {
for (MockLogAppender.LoggingExpectation expectation : expectations) {
appender.addExpectation(expectation);
}
@ -568,8 +562,6 @@ public class TcpTransportTests extends OpenSearchTestCase {
appender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger(TcpTransport.class), appender);
appender.stop();
ThreadPool.terminate(testThreadPool, 30, TimeUnit.SECONDS);
}
}

View File

@ -38,7 +38,6 @@ import org.opensearch.action.admin.cluster.stats.ClusterStatsAction;
import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest;
import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.test.OpenSearchTestCase;
@ -51,57 +50,44 @@ import static org.mockito.Mockito.mock;
@TestLogging(value = "org.opensearch.transport.TransportLogger:trace", reason = "to ensure we log network events on TRACE level")
public class TransportLoggerTests extends OpenSearchTestCase {
public void testLoggingHandler() throws Exception {
try (MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getLogger(TransportLogger.class))) {
final String writePattern = ".*\\[length: \\d+"
+ ", request id: \\d+"
+ ", type: request"
+ ", version: .*"
+ ", header size: \\d+B"
+ ", action: cluster:monitor/stats]"
+ " WRITE: \\d+B";
final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation(
"hot threads request",
TransportLogger.class.getCanonicalName(),
Level.TRACE,
writePattern
);
private MockLogAppender appender;
final String readPattern = ".*\\[length: \\d+"
+ ", request id: \\d+"
+ ", type: request"
+ ", version: .*"
+ ", header size: \\d+B"
+ ", action: cluster:monitor/stats]"
+ " READ: \\d+B";
public void setUp() throws Exception {
super.setUp();
appender = MockLogAppender.createStarted();
Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender);
}
final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation(
"cluster monitor request",
TransportLogger.class.getCanonicalName(),
Level.TRACE,
readPattern
);
public void tearDown() throws Exception {
Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender);
appender.stop();
super.tearDown();
}
public void testLoggingHandler() throws IOException {
final String writePattern = ".*\\[length: \\d+"
+ ", request id: \\d+"
+ ", type: request"
+ ", version: .*"
+ ", header size: \\d+B"
+ ", action: cluster:monitor/stats]"
+ " WRITE: \\d+B";
final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation(
"hot threads request",
TransportLogger.class.getCanonicalName(),
Level.TRACE,
writePattern
);
final String readPattern = ".*\\[length: \\d+"
+ ", request id: \\d+"
+ ", type: request"
+ ", version: .*"
+ ", header size: \\d+B"
+ ", action: cluster:monitor/stats]"
+ " READ: \\d+B";
final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation(
"cluster monitor request",
TransportLogger.class.getCanonicalName(),
Level.TRACE,
readPattern
);
appender.addExpectation(writeExpectation);
appender.addExpectation(readExpectation);
BytesReference bytesReference = buildRequest();
TransportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6));
TransportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference);
appender.assertAllExpectationsMatched();
appender.addExpectation(writeExpectation);
appender.addExpectation(readExpectation);
BytesReference bytesReference = buildRequest();
TransportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6));
TransportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference);
appender.assertAllExpectationsMatched();
}
}
private BytesReference buildRequest() throws IOException {

View File

@ -32,11 +32,15 @@
package org.opensearch.test;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.filter.RegexFilter;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.regex.Regex;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.regex.Pattern;
@ -47,32 +51,43 @@ import static org.hamcrest.MatcherAssert.assertThat;
/**
* Test appender that can be used to verify that certain events were logged correctly
*/
public class MockLogAppender extends AbstractAppender {
public class MockLogAppender extends AbstractAppender implements AutoCloseable {
private static final String COMMON_PREFIX = System.getProperty("opensearch.logger.prefix", "org.opensearch.");
private final List<LoggingExpectation> expectations;
private final List<Logger> loggers;
/**
* Creates and starts a MockLogAppender. Generally preferred over using the constructor
* directly because adding an unstarted appender to the static logging context can cause
* difficult-to-identify errors in the tests and this method makes it impossible to do
* that.
* Creates an instance and adds it as an appender to the given Loggers. Upon
* closure, this instance will then remove itself from the Loggers it was added
* to. It is strongly recommended to use this class in a try-with-resources block
* to guarantee that it is properly removed from all Loggers. Since the logging
* state is static and therefore global within a JVM, it can cause unrelated
* tests to fail if, for example, they trigger a logging statement that tried to
* write to a closed MockLogAppender instance.
*/
public static MockLogAppender createStarted() throws IllegalAccessException {
final MockLogAppender appender = new MockLogAppender();
public static MockLogAppender createForLoggers(Logger... loggers) throws IllegalAccessException {
final MockLogAppender appender = new MockLogAppender(
RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null),
Collections.unmodifiableList(Arrays.asList(loggers))
);
appender.start();
for (Logger logger : loggers) {
Loggers.addAppender(logger, appender);
}
return appender;
}
public MockLogAppender() throws IllegalAccessException {
super("mock", RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
private MockLogAppender(RegexFilter filter, List<Logger> loggers) {
super("mock", filter, null);
/*
* We use a copy-on-write array list since log messages could be appended while we are setting up expectations. When that occurs,
* we would run into a concurrent modification exception from the iteration over the expectations in #append, concurrent with a
* modification from #addExpectation.
*/
expectations = new CopyOnWriteArrayList<>();
this.expectations = new CopyOnWriteArrayList<>();
this.loggers = loggers;
}
public void addExpectation(LoggingExpectation expectation) {
@ -92,6 +107,14 @@ public class MockLogAppender extends AbstractAppender {
}
}
@Override
public void close() {
for (Logger logger : loggers) {
Loggers.removeAppender(logger, this);
}
this.stop();
}
public interface LoggingExpectation {
void match(LogEvent event);

View File

@ -34,6 +34,7 @@ package org.opensearch.transport;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
@ -51,7 +52,6 @@ import org.opensearch.common.SuppressForbidden;
import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.network.CloseableChannel;
import org.opensearch.common.network.NetworkAddress;
import org.opensearch.common.network.NetworkUtils;
@ -1202,10 +1202,8 @@ public abstract class AbstractSimpleTransportTestCase extends OpenSearchTestCase
.build()
);
MockLogAppender appender = new MockLogAppender();
try {
appender.start();
Loggers.addAppender(LogManager.getLogger("org.opensearch.transport.TransportService.tracer"), appender);
final Logger logger = LogManager.getLogger("org.opensearch.transport.TransportService.tracer");
try (MockLogAppender appender = MockLogAppender.createForLoggers(logger)) {
final String requestSent = ".*\\[internal:test].*sent to.*\\{TS_B}.*";
final MockLogAppender.LoggingExpectation requestSentExpectation = new MockLogAppender.PatternSeenEventExpectation(
"sent request",
@ -1291,9 +1289,6 @@ public abstract class AbstractSimpleTransportTestCase extends OpenSearchTestCase
future.txGet();
assertBusy(appender::assertAllExpectationsMatched);
} finally {
Loggers.removeAppender(LogManager.getLogger("org.opensearch.transport.TransportService.tracer"), appender);
appender.stop();
}
}

View File

@ -35,7 +35,6 @@ package org.opensearch.transport.nio;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.logging.Loggers;
import org.opensearch.common.settings.Settings;
import org.opensearch.nio.ServerChannelContext;
import org.opensearch.nio.SocketChannelContext;
@ -54,73 +53,60 @@ import static org.mockito.Mockito.mock;
public class TestEventHandlerTests extends OpenSearchTestCase {
private MockLogAppender appender;
public void setUp() throws Exception {
super.setUp();
appender = new MockLogAppender();
Loggers.addAppender(LogManager.getLogger(MockNioTransport.class), appender);
appender.start();
}
public void tearDown() throws Exception {
Loggers.removeAppender(LogManager.getLogger(MockNioTransport.class), appender);
appender.stop();
super.tearDown();
}
public void testLogOnElapsedTime() throws Exception {
long start = System.nanoTime();
long end = start + TimeUnit.MILLISECONDS.toNanos(400);
AtomicBoolean isStart = new AtomicBoolean(true);
LongSupplier timeSupplier = () -> {
if (isStart.compareAndSet(true, false)) {
return start;
} else if (isStart.compareAndSet(false, true)) {
return end;
}
throw new IllegalStateException("Cannot update isStart");
};
final ThreadPool threadPool = mock(ThreadPool.class);
doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos();
TestEventHandler eventHandler = new TestEventHandler(
e -> {},
() -> null,
new MockNioTransport.TransportThreadWatchdog(threadPool, Settings.EMPTY)
);
ServerChannelContext serverChannelContext = mock(ServerChannelContext.class);
SocketChannelContext socketChannelContext = mock(SocketChannelContext.class);
RuntimeException exception = new RuntimeException("boom");
Map<String, CheckedRunnable<Exception>> tests = new HashMap<>();
tests.put("acceptChannel", () -> eventHandler.acceptChannel(serverChannelContext));
tests.put("acceptException", () -> eventHandler.acceptException(serverChannelContext, exception));
tests.put("registrationException", () -> eventHandler.registrationException(socketChannelContext, exception));
tests.put("handleConnect", () -> eventHandler.handleConnect(socketChannelContext));
tests.put("connectException", () -> eventHandler.connectException(socketChannelContext, exception));
tests.put("handleRead", () -> eventHandler.handleRead(socketChannelContext));
tests.put("readException", () -> eventHandler.readException(socketChannelContext, exception));
tests.put("handleWrite", () -> eventHandler.handleWrite(socketChannelContext));
tests.put("writeException", () -> eventHandler.writeException(socketChannelContext, exception));
tests.put("handleTask", () -> eventHandler.handleTask(mock(Runnable.class)));
tests.put("taskException", () -> eventHandler.taskException(exception));
tests.put("handleClose", () -> eventHandler.handleClose(socketChannelContext));
tests.put("closeException", () -> eventHandler.closeException(socketChannelContext, exception));
tests.put("genericChannelException", () -> eventHandler.genericChannelException(socketChannelContext, exception));
for (Map.Entry<String, CheckedRunnable<Exception>> entry : tests.entrySet()) {
String message = "*Slow execution on network thread*";
MockLogAppender.LoggingExpectation slowExpectation = new MockLogAppender.SeenEventExpectation(
entry.getKey(),
MockNioTransport.class.getCanonicalName(),
Level.WARN,
message
try (MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getLogger(MockNioTransport.class))) {
long start = System.nanoTime();
long end = start + TimeUnit.MILLISECONDS.toNanos(400);
AtomicBoolean isStart = new AtomicBoolean(true);
LongSupplier timeSupplier = () -> {
if (isStart.compareAndSet(true, false)) {
return start;
} else if (isStart.compareAndSet(false, true)) {
return end;
}
throw new IllegalStateException("Cannot update isStart");
};
final ThreadPool threadPool = mock(ThreadPool.class);
doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos();
TestEventHandler eventHandler = new TestEventHandler(
e -> {},
() -> null,
new MockNioTransport.TransportThreadWatchdog(threadPool, Settings.EMPTY)
);
appender.addExpectation(slowExpectation);
entry.getValue().run();
appender.assertAllExpectationsMatched();
ServerChannelContext serverChannelContext = mock(ServerChannelContext.class);
SocketChannelContext socketChannelContext = mock(SocketChannelContext.class);
RuntimeException exception = new RuntimeException("boom");
Map<String, CheckedRunnable<Exception>> tests = new HashMap<>();
tests.put("acceptChannel", () -> eventHandler.acceptChannel(serverChannelContext));
tests.put("acceptException", () -> eventHandler.acceptException(serverChannelContext, exception));
tests.put("registrationException", () -> eventHandler.registrationException(socketChannelContext, exception));
tests.put("handleConnect", () -> eventHandler.handleConnect(socketChannelContext));
tests.put("connectException", () -> eventHandler.connectException(socketChannelContext, exception));
tests.put("handleRead", () -> eventHandler.handleRead(socketChannelContext));
tests.put("readException", () -> eventHandler.readException(socketChannelContext, exception));
tests.put("handleWrite", () -> eventHandler.handleWrite(socketChannelContext));
tests.put("writeException", () -> eventHandler.writeException(socketChannelContext, exception));
tests.put("handleTask", () -> eventHandler.handleTask(mock(Runnable.class)));
tests.put("taskException", () -> eventHandler.taskException(exception));
tests.put("handleClose", () -> eventHandler.handleClose(socketChannelContext));
tests.put("closeException", () -> eventHandler.closeException(socketChannelContext, exception));
tests.put("genericChannelException", () -> eventHandler.genericChannelException(socketChannelContext, exception));
for (Map.Entry<String, CheckedRunnable<Exception>> entry : tests.entrySet()) {
String message = "*Slow execution on network thread*";
MockLogAppender.LoggingExpectation slowExpectation = new MockLogAppender.SeenEventExpectation(
entry.getKey(),
MockNioTransport.class.getCanonicalName(),
Level.WARN,
message
);
appender.addExpectation(slowExpectation);
entry.getValue().run();
appender.assertAllExpectationsMatched();
}
}
}
}