From 03a499821c9676da0896ca864074dfb8fbdefd6e Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 2 Jun 2023 18:07:34 -0700 Subject: [PATCH] HADOOP-18207. Introduce hadoop-logging module (#5503) Reviewed-by: Duo Zhang --- .../hadoop-auth-examples/pom.xml | 10 -- hadoop-common-project/hadoop-auth/pom.xml | 12 +- .../util/TestRandomSignerSecretProvider.java | 8 +- .../util/TestZKSignerSecretProvider.java | 8 +- hadoop-common-project/hadoop-common/pom.xml | 10 ++ .../src/main/conf/log4j.properties | 2 +- .../java/org/apache/hadoop/log/LogLevel.java | 21 ++- .../org/apache/hadoop/util/StringUtils.java | 4 +- .../apache/hadoop/conf/TestConfiguration.java | 91 +++-------- .../io/compress/CompressDecompressTester.java | 14 +- .../org/apache/hadoop/log/TestLogLevel.java | 19 ++- .../impl/TestMetricsSourceAdapter.java | 11 +- .../TestShellBasedUnixGroupsMapping.java | 6 +- .../ssl/TestReloadingX509KeyManager.java | 4 +- .../ssl/TestReloadingX509TrustManager.java | 2 +- .../hadoop/service/TestServiceOperations.java | 4 +- .../apache/hadoop/test/GenericTestUtils.java | 91 +---------- .../hadoop/test/TestGenericTestUtils.java | 2 + .../org/apache/hadoop/util/TestClassUtil.java | 4 +- .../hadoop/util/TestReflectionUtils.java | 2 +- .../util/bloom/BloomFilterCommonTester.java | 6 +- hadoop-common-project/hadoop-kms/pom.xml | 6 + .../hadoop/crypto/key/kms/server/TestKMS.java | 5 +- .../crypto/key/kms/server/TestKMSAudit.java | 24 +-- .../dev-support/findbugsExcludeFile.xml | 23 +++ hadoop-common-project/hadoop-logging/pom.xml | 125 +++++++++++++++ .../logging/HadoopInternalLog4jUtils.java | 145 ++++++++++++++++++ .../hadoop/logging/HadoopLoggerUtils.java | 142 +++++++++++++++++ .../logging/appenders}/AsyncRFAAppender.java | 2 +- .../Log4jWarningErrorMetricsAppender.java | 49 +++--- .../apache/hadoop/logging/LogCapturer.java | 65 ++++++++ .../hadoop/logging/test/TestSyncLogs.java | 37 +++++ .../src/test/resources/log4j.properties | 18 +++ hadoop-common-project/hadoop-minikdc/pom.xml | 5 - hadoop-common-project/pom.xml | 1 + .../hadoop-hdfs-client/pom.xml | 6 + .../hdfs/web/TestURLConnectionFactory.java | 5 +- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 6 + ...RouterRefreshFairnessPolicyController.java | 5 +- ...TestRouterRpcFairnessPolicyController.java | 4 +- .../router/TestRouterNamenodeMonitoring.java | 15 +- .../federation/router/TestRouterRpc.java | 18 ++- .../router/TestRouterRpcMultiDestination.java | 12 +- .../dev-support/findbugsExcludeFile.xml | 10 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 + .../hdfs/server/common/MetricsLoggerTask.java | 7 +- .../server/namenode/FsImageValidation.java | 21 ++- .../hadoop/hdfs/LogVerificationAppender.java | 75 --------- .../org/apache/hadoop/hdfs/TestDFSRename.java | 7 +- .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 9 +- .../apache/hadoop/hdfs/TestDataStream.java | 4 +- .../hadoop/hdfs/TestEncryptedTransfer.java | 12 +- .../sasl/TestSaslDataTransfer.java | 4 +- .../server/TestJournaledEditsCache.java | 2 +- .../balancer/TestBalancerWithHANameNodes.java | 2 +- .../TestBlockManagerSafeMode.java | 9 +- .../TestPendingReconstruction.java | 4 +- .../TestReplicationPolicy.java | 45 +++--- .../datanode/TestDataNodeMetricsLogger.java | 37 ++--- .../server/datanode/TestDirectoryScanner.java | 16 +- .../fsdataset/impl/TestFsDatasetCache.java | 12 +- .../server/diskbalancer/TestDiskBalancer.java | 3 +- .../namenode/PatternMatchingAppender.java | 58 ------- .../hdfs/server/namenode/TestAuditLogger.java | 2 +- .../namenode/TestAuditLoggerWithCommands.java | 2 +- .../hdfs/server/namenode/TestAuditLogs.java | 15 +- .../hdfs/server/namenode/TestCheckpoint.java | 4 +- .../namenode/TestDiskspaceQuotaUpdate.java | 2 +- .../hdfs/server/namenode/TestEditLog.java | 40 +---- .../namenode/TestEditsDoubleBuffer.java | 6 +- .../server/namenode/TestFSEditLogLoader.java | 2 +- .../server/namenode/TestFSNamesystemLock.java | 2 +- .../namenode/TestFSNamesystemLockReport.java | 6 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../namenode/TestNameNodeMetricsLogger.java | 29 ++-- .../namenode/TestNameNodeResourcePolicy.java | 3 +- .../hdfs/server/namenode/TestStartup.java | 16 +- .../namenode/ha/TestBootstrapStandby.java | 4 +- .../ha/TestDelegationTokensWithHA.java | 3 +- .../namenode/ha/TestStandbyCheckpoints.java | 54 ++++--- .../TestExternalStoragePolicySatisfier.java | 4 +- .../src/test/resources/log4j.properties | 13 +- .../hadoop-mapreduce-client-app/pom.xml | 6 + .../v2/app/job/impl/TestTaskAttempt.java | 62 ++------ .../hadoop-mapreduce-client-core/pom.xml | 6 + .../org/apache/hadoop/mapred/TaskLog.java | 46 +----- .../mapreduce/TestJobMonitorAndPrint.java | 95 ++++++------ .../hadoop-mapreduce-client-jobclient/pom.xml | 6 + .../apache/hadoop/mapred/TestYARNRunner.java | 124 +++++++-------- .../apache/hadoop/mapreduce/TestChild.java | 28 ++-- .../mapreduce/security/TestJHSSecurity.java | 6 +- .../hadoop/mapreduce/v2/TestMRJobs.java | 11 +- hadoop-project/pom.xml | 12 ++ hadoop-tools/hadoop-azure/pom.xml | 7 +- .../ITestFileSystemOperationsWithThreads.java | 2 +- ...estNativeAzureFileSystemClientLogging.java | 2 +- hadoop-tools/hadoop-distcp/pom.xml | 6 + .../contract/AbstractContractDistCpTest.java | 5 +- .../util/MapReduceJobPropertiesParser.java | 5 +- .../dev-support/findbugs-exclude.xml | 10 -- .../distributedshell/ApplicationMaster.java | 6 +- .../applications/distributedshell/Client.java | 3 +- .../distributedshell/Log4jPropertyHelper.java | 48 ------ .../client/TestSecureApiServiceClient.java | 6 +- .../yarn/service/component/TestComponent.java | 3 - .../TestAggregatedLogDeletionService.java | 4 +- .../TestLog4jWarningErrorMetricsAppender.java | 1 + .../webapp/NavBlock.java | 2 +- .../server/webapp/ErrorsAndWarningsBlock.java | 2 +- .../server/nodemanager/webapp/NavBlock.java | 2 +- .../TestContainersMonitorResourceChange.java | 6 +- .../pom.xml | 7 + .../constraint/AllocationTagsManager.java | 7 +- .../resourcemanager/webapp/NavBlock.java | 2 +- .../TestFederationRMStateStoreService.java | 5 +- .../TestSystemMetricsPublisherForV2.java | 42 +---- ...acityPreemptionPolicyPreemptToBalance.java | 7 +- ...CapacityPreemptionPolicyMockFramework.java | 4 +- .../TestMetricsInvariantChecker.java | 6 +- .../fair/TestFairSchedulerConfiguration.java | 68 ++------ 120 files changed, 1165 insertions(+), 1106 deletions(-) create mode 100644 hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml create mode 100644 hadoop-common-project/hadoop-logging/pom.xml create mode 100644 hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java create mode 100644 hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java rename {hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util => hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders}/AsyncRFAAppender.java (98%) rename {hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util => hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders}/Log4jWarningErrorMetricsAppender.java (93%) create mode 100644 hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java create mode 100644 hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java create mode 100644 hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index 4deda432797..9a060f75028 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -46,16 +46,6 @@ slf4j-api compile - - log4j - log4j - runtime - - - org.slf4j - slf4j-log4j12 - runtime - diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 433a615c606..4cdd6006a46 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -82,14 +82,14 @@ compile - log4j - log4j - runtime + org.apache.hadoop + hadoop-logging - org.slf4j - slf4j-log4j12 - runtime + org.apache.hadoop + hadoop-logging + test + test-jar org.apache.hadoop diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java index f9c922caac8..e18982d75ff 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java @@ -15,8 +15,7 @@ package org.apache.hadoop.security.authentication.util; import java.util.Random; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.junit.Assert; import org.junit.Test; @@ -30,9 +29,8 @@ public class TestRandomSignerSecretProvider { private final int timeout = 500; private final long rolloverFrequency = timeout / 2; - { - LogManager.getLogger( - RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG); + static { + HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG"); } @Test diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java index 628342e40dc..d81d1eb3359 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java @@ -19,8 +19,7 @@ import java.util.Random; import javax.servlet.ServletContext; import org.apache.curator.test.TestingServer; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -39,9 +38,8 @@ public class TestZKSignerSecretProvider { private final int timeout = 100; private final long rolloverFrequency = timeout / 2; - { - LogManager.getLogger( - RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG); + static { + HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG"); } @Before diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 6c6d3ec5bf9..58006c011da 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -411,6 +411,16 @@ lz4-java provided + + org.apache.hadoop + hadoop-logging + + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index b4eec1fe2cc..086665151e8 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -299,7 +299,7 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex} yarn.ewma.cleanupInterval=300 yarn.ewma.messageAgeLimitSeconds=86400 yarn.ewma.maxUniqueMessages=250 -log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender +log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index 32879597a9c..cf090eea009 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -42,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.ssl.SSLFactory; @@ -50,8 +51,6 @@ import org.apache.hadoop.util.GenericsUtil; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; /** * Change log level in runtime. @@ -349,7 +348,7 @@ public class LogLevel { } if (GenericsUtil.isLog4jLogger(logName)) { - process(Logger.getLogger(logName), level, out); + process(logName, level, out); } else { out.println("Sorry, setting log level is only supported for log4j loggers.
"); } @@ -368,19 +367,17 @@ public class LogLevel { + "" + ""; - private static void process(Logger log, String level, - PrintWriter out) throws IOException { + private static void process(String log, String level, PrintWriter out) { if (level != null) { - if (!level.equalsIgnoreCase(Level.toLevel(level) - .toString())) { - out.println(MARKER + "Bad Level : " + level + "
"); - } else { - log.setLevel(Level.toLevel(level)); + try { + HadoopLoggerUtils.setLogLevel(log, level); out.println(MARKER + "Setting Level to " + level + " ...
"); + } catch (IllegalArgumentException e) { + out.println(MARKER + "Bad Level : " + level + "
"); } } - out.println(MARKER - + "Effective Level: " + log.getEffectiveLevel() + "
"); + out.println(MARKER + "Effective Level: " + HadoopLoggerUtils.getEffectiveLevel(log) + + "
"); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 3debd36da78..3c13feac3ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -40,8 +40,8 @@ import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.net.NetUtils; -import org.apache.log4j.LogManager; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -761,7 +761,7 @@ public class StringUtils { public void run() { log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); - LogManager.shutdown(); + HadoopLoggerUtils.shutdownLogManager(); } }, SHUTDOWN_HOOK_PRIORITY); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index b3487ef309f..913826f3eed 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -68,6 +68,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.alias.CredentialProvider; import org.apache.hadoop.security.alias.CredentialProviderFactory; @@ -76,10 +77,8 @@ import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.mockito.Mockito; +import org.slf4j.LoggerFactory; public class TestConfiguration { @@ -220,9 +219,7 @@ public class TestConfiguration { InputStream in2 = new ByteArrayInputStream(bytes2); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the 2 different resources - this should generate a warning @@ -230,17 +227,13 @@ public class TestConfiguration { conf.addResource(in2); assertEquals("should see the first value", "A", conf.get("prop")); - List events = appender.getLog(); - assertEquals("overriding a final parameter should cause logging", 1, - events.size()); - LoggingEvent loggingEvent = events.get(0); - String renderedMessage = loggingEvent.getRenderedMessage(); - assertTrue("did not see expected string inside message "+ renderedMessage, - renderedMessage.contains("an attempt to override final parameter: " - + "prop; Ignoring.")); + String renderedMessage = logCapturer.getOutput(); + assertTrue("did not see expected string inside message " + renderedMessage, + renderedMessage.contains( + "an attempt to override final parameter: " + "prop; Ignoring.")); } finally { // Make sure the appender is removed - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } @@ -258,9 +251,7 @@ public class TestConfiguration { InputStream in2 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the resource twice from a stream - should not generate warnings @@ -268,20 +259,15 @@ public class TestConfiguration { conf.addResource(in2); assertEquals("A", conf.get("prop")); - List events = appender.getLog(); - for (LoggingEvent loggingEvent : events) { - System.out.println("Event = " + loggingEvent.getRenderedMessage()); - } + String appenderOutput = logCapturer.getOutput(); assertTrue("adding same resource twice should not cause logging", - events.isEmpty()); + appenderOutput.isEmpty()); } finally { // Make sure the appender is removed - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } - - @Test public void testFinalWarningsMultiple() throws Exception { // Make a configuration file with a repeated final property @@ -295,24 +281,19 @@ public class TestConfiguration { InputStream in1 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the resource - this should not produce a warning conf.addResource(in1); assertEquals("should see the value", "A", conf.get("prop")); - List events = appender.getLog(); - for (LoggingEvent loggingEvent : events) { - System.out.println("Event = " + loggingEvent.getRenderedMessage()); - } + String appenderOutput = logCapturer.getOutput(); assertTrue("adding same resource twice should not cause logging", - events.isEmpty()); + appenderOutput.isEmpty()); } finally { // Make sure the appender is removed - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } @@ -329,48 +310,20 @@ public class TestConfiguration { InputStream in1 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the resource - this should produce a warning conf.addResource(in1); assertEquals("should see the value", "A", conf.get("prop")); - List events = appender.getLog(); - assertEquals("overriding a final parameter should cause logging", 1, - events.size()); - LoggingEvent loggingEvent = events.get(0); - String renderedMessage = loggingEvent.getRenderedMessage(); - assertTrue("did not see expected string inside message "+ renderedMessage, - renderedMessage.contains("an attempt to override final parameter: " - + "prop; Ignoring.")); + String renderedMessage = logCapturer.getOutput(); + assertTrue("did not see expected string inside message " + renderedMessage, + renderedMessage.contains( + "an attempt to override final parameter: " + "prop; Ignoring.")); } finally { // Make sure the appender is removed - logger.removeAppender(appender); - } - } - - /** - * A simple appender for white box testing. - */ - private static class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override public boolean requiresLayout() { - return false; - } - - @Override protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override public void close() { - } - - public List getLog() { - return new ArrayList<>(log); + logCapturer.stopCapturing(); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java index c016ff03789..9e4405f6d18 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java @@ -36,8 +36,9 @@ import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater; import org.apache.hadoop.io.compress.zlib.ZlibCompressor; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.log4j.Logger; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -47,9 +48,6 @@ import static org.junit.Assert.*; public class CompressDecompressTester { - private static final Logger logger = Logger - .getLogger(CompressDecompressTester.class); - private final byte[] originalRawData; private ImmutableList> pairs = ImmutableList.of(); @@ -488,12 +486,12 @@ public class CompressDecompressTester future = null; private AtomicBoolean hasError = null; - private static final Logger LOG = Logger.getLogger(SourceUpdater.class); + private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class); public SourceUpdater(MetricsSourceAdapter sourceAdapter, AtomicBoolean err) { @@ -263,7 +264,7 @@ public class TestMetricsSourceAdapter { } catch (Exception e) { // catch all errors hasError.set(true); - LOG.error(e.getStackTrace()); + LOG.error("Something went wrong.", e); } finally { if (hasError.get()) { LOG.error("Hit error, stopping now"); @@ -284,7 +285,7 @@ public class TestMetricsSourceAdapter { private int cnt = 0; private ScheduledFuture future = null; private AtomicBoolean hasError = null; - private static final Logger LOG = Logger.getLogger(SourceReader.class); + private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class); public SourceReader( TestMetricsSource source, MetricsSourceAdapter sourceAdapter, @@ -318,7 +319,7 @@ public class TestMetricsSourceAdapter { } catch (Exception e) { // catch other errors hasError.set(true); - LOG.error(e.getStackTrace()); + LOG.error("Something went wrong.", e); } finally { if (hasError.get()) { future.cancel(false); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java index 8c1339d38d5..b1399712e66 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ExitCodeException; @@ -41,8 +41,8 @@ public class TestShellBasedUnixGroupsMapping { private static final Logger TESTLOG = LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class); - private final GenericTestUtils.LogCapturer shellMappingLog = - GenericTestUtils.LogCapturer.captureLogs( + private final LogCapturer shellMappingLog = + LogCapturer.captureLogs( ShellBasedUnixGroupsMapping.LOG); private class TestGroupUserNotExist diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java index a0ce721ecf0..6a6fff89c16 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -19,6 +19,8 @@ package org.apache.hadoop.security.ssl; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.BeforeClass; import org.junit.Test; @@ -42,7 +44,7 @@ public class TestReloadingX509KeyManager { private static final String BASEDIR = GenericTestUtils.getTempPath( TestReloadingX509TrustManager.class.getSimpleName()); - private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( + private final LogCapturer reloaderLog = LogCapturer.captureLogs( FileMonitoringTimerTask.LOG); @BeforeClass diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 63589592f35..8d2a4c78f5f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -19,7 +19,7 @@ package org.apache.hadoop.security.ssl; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import java.util.function.Supplier; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java index b7b86b7aa0d..839c51c5e10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java @@ -18,7 +18,7 @@ package org.apache.hadoop.service; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import java.io.PrintWriter; -import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs; +import static org.apache.hadoop.logging.LogCapturer.captureLogs; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index e54971e491c..825fc706f49 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -25,7 +25,6 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; -import java.io.StringWriter; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; @@ -38,7 +37,6 @@ import java.util.Locale; import java.util.Objects; import java.util.Random; import java.util.Set; -import java.util.Enumeration; import java.util.TreeSet; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -53,17 +51,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.log4j.Appender; -import org.apache.log4j.Layout; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; import org.junit.Assert; import org.junit.Assume; import org.mockito.invocation.InvocationOnMock; @@ -115,51 +107,17 @@ public abstract class GenericTestUtils { public static final String ERROR_INVALID_ARGUMENT = "Total wait time should be greater than check interval time"; - @Deprecated - public static Logger toLog4j(org.slf4j.Logger logger) { - return LogManager.getLogger(logger.getName()); - } - - /** - * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead - */ - @Deprecated - public static void disableLog(Logger logger) { - logger.setLevel(Level.OFF); - } - public static void disableLog(org.slf4j.Logger logger) { - disableLog(toLog4j(logger)); - } - - public static void setLogLevel(Logger logger, Level level) { - logger.setLevel(level); - } - - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated - public static void setLogLevel(org.slf4j.Logger logger, Level level) { - setLogLevel(toLog4j(logger), level); + HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF"); } public static void setLogLevel(org.slf4j.Logger logger, org.slf4j.event.Level level) { - setLogLevel(toLog4j(logger), Level.toLevel(level.toString())); + HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString()); } public static void setRootLogLevel(org.slf4j.event.Level level) { - setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString())); - } - - public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) { - for (Enumeration loggers = LogManager.getCurrentLoggers(); - loggers.hasMoreElements();) { - Logger logger = (Logger) loggers.nextElement(); - logger.setLevel(Level.toLevel(level.toString())); - } + HadoopLoggerUtils.setLogLevel("root", level.toString()); } public static org.slf4j.event.Level toLevel(String level) { @@ -471,47 +429,6 @@ public abstract class GenericTestUtils { } } - public static class LogCapturer { - private StringWriter sw = new StringWriter(); - private WriterAppender appender; - private Logger logger; - - public static LogCapturer captureLogs(org.slf4j.Logger logger) { - if (logger.getName().equals("root")) { - return new LogCapturer(org.apache.log4j.Logger.getRootLogger()); - } - return new LogCapturer(toLog4j(logger)); - } - - public static LogCapturer captureLogs(Logger logger) { - return new LogCapturer(logger); - } - - private LogCapturer(Logger logger) { - this.logger = logger; - Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); - if (defaultAppender == null) { - defaultAppender = Logger.getRootLogger().getAppender("console"); - } - final Layout layout = (defaultAppender == null) ? new PatternLayout() : - defaultAppender.getLayout(); - this.appender = new WriterAppender(layout, sw); - logger.addAppender(this.appender); - } - - public String getOutput() { - return sw.toString(); - } - - public void stopCapturing() { - logger.removeAppender(appender); - } - - public void clearOutput() { - sw.getBuffer().setLength(0); - } - } - /** * Mockito answer helper that triggers one latch as soon as the * method is called, then waits on another before continuing. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java index 8489e3d24f3..f6f4a448e0e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java @@ -26,6 +26,8 @@ import org.slf4j.LoggerFactory; import java.util.function.Supplier; import org.slf4j.event.Level; +import org.apache.hadoop.logging.LogCapturer; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java index 98e182236c9..8375864e5fd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java @@ -22,8 +22,8 @@ import java.io.File; import org.junit.Assert; -import org.apache.log4j.Logger; import org.junit.Test; +import org.slf4j.Logger; public class TestClassUtil { @Test(timeout=10000) @@ -35,6 +35,6 @@ public class TestClassUtil { Assert.assertTrue("Containing jar does not exist on file system ", jarFile.exists()); Assert.assertTrue("Incorrect jar file " + containingJar, - jarFile.getName().matches("log4j.*[.]jar")); + jarFile.getName().matches("slf4j-api.*[.]jar")); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index 1d1ce893a97..ec26af66017 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -28,7 +28,7 @@ import java.util.List; import static org.junit.Assert.*; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java index f43930dd07a..fb6221f2704 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java @@ -28,10 +28,12 @@ import java.util.Iterator; import java.util.Random; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.util.hash.Hash; -import org.apache.log4j.Logger; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; @@ -113,7 +115,7 @@ public class BloomFilterCommonTester { } interface FilterTesterStrategy { - final Logger logger = Logger.getLogger(FilterTesterStrategy.class); + Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class); void assertWhat(Filter filter, int numInsertions, int hashId, ImmutableSet falsePositives); diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 96588a22b94..8a04c4ebcfb 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -53,6 +53,12 @@ hadoop-auth compile + + org.apache.hadoop + hadoop-logging + test + test-jar + org.apache.hadoop.thirdparty hadoop-shaded-guava diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index f4c7fbe0b3c..97d854285ff 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -49,6 +49,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.apache.http.client.utils.URIBuilder; import org.junit.After; @@ -583,8 +584,8 @@ public class TestKMS { @Test public void testStartStopHttpPseudo() throws Exception { // Make sure bogus errors don't get emitted. - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( + LogCapturer logs = + LogCapturer.captureLogs(LoggerFactory.getLogger( "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator")); try { testStartStop(false, false); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java index 3d0fd7de642..6e12d946ff3 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java @@ -18,23 +18,24 @@ package org.apache.hadoop.crypto.key.kms.server; import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.FilterOutputStream; -import java.io.InputStream; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Paths; import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp; -import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.ThreadUtil; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -67,24 +68,23 @@ public class TestKMSAudit { public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS); @Before - public void setUp() throws IOException { + public void setUp() throws IOException, URISyntaxException { originalOut = System.err; memOut = new ByteArrayOutputStream(); filterOut = new FilterOut(memOut); capturedOut = new PrintStream(filterOut); System.setErr(capturedOut); - InputStream is = - ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties"); - PropertyConfigurator.configure(is); - IOUtils.closeStream(is); + URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties"); + File file = Paths.get(url.toURI()).toFile(); + HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath()); Configuration conf = new Configuration(); this.kmsAudit = new KMSAudit(conf); } @After - public void cleanUp() { + public void cleanUp() throws Exception { System.setErr(originalOut); - LogManager.resetConfiguration(); + HadoopLoggerUtils.resetConfiguration(); kmsAudit.shutdown(); } diff --git a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..304d1e45157 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-common-project/hadoop-logging/pom.xml b/hadoop-common-project/hadoop-logging/pom.xml new file mode 100644 index 00000000000..20af2bee769 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/pom.xml @@ -0,0 +1,125 @@ + + + + + + hadoop-project + org.apache.hadoop + 3.4.0-SNAPSHOT + ../../hadoop-project + + 4.0.0 + + hadoop-logging + 3.4.0-SNAPSHOT + jar + + Apache Hadoop Logging + Logging Support for Apache Hadoop project + + + UTF-8 + + + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.commons + commons-lang3 + + + org.slf4j + slf4j-api + + + junit + junit + test + + + org.slf4j + slf4j-log4j12 + test + + + log4j + log4j + provided + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + prepare-package + + jar + + + + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + prepare-jar + prepare-package + + jar + + + + prepare-test-jar + prepare-package + + test-jar + + + + + + org.apache.rat + apache-rat-plugin + + + dev-support/findbugsExcludeFile.xml + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + + + \ No newline at end of file diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java new file mode 100644 index 00000000000..b0bd2e31fcd --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.logging; + +import java.io.FileInputStream; +import java.io.Flushable; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Enumeration; +import java.util.Map; +import java.util.Properties; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.log4j.Appender; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; + +/** + * Hadoop's internal class that access log4j APIs directly. + *

+ * This class will depend on log4j directly, so callers should not use this class directly to avoid + * introducing log4j dependencies to downstream users. Please call the methods in + * {@link HadoopLoggerUtils}, as they will call the methods here through reflection. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +final class HadoopInternalLog4jUtils { + + private HadoopInternalLog4jUtils() { + } + + static void setLogLevel(String loggerName, String levelName) { + if (loggerName == null) { + throw new IllegalArgumentException("logger name cannot be null"); + } + Logger logger = loggerName.equalsIgnoreCase("root") ? + LogManager.getRootLogger() : + LogManager.getLogger(loggerName); + Level level = Level.toLevel(levelName.toUpperCase()); + if (!level.toString().equalsIgnoreCase(levelName)) { + throw new IllegalArgumentException("Unsupported log level " + levelName); + } + logger.setLevel(level); + } + + static void shutdownLogManager() { + LogManager.shutdown(); + } + + static String getEffectiveLevel(String loggerName) { + Logger logger = loggerName.equalsIgnoreCase("root") ? + LogManager.getRootLogger() : + LogManager.getLogger(loggerName); + return logger.getEffectiveLevel().toString(); + } + + static void resetConfiguration() { + LogManager.resetConfiguration(); + } + + static void updateLog4jConfiguration(Class targetClass, String log4jPath) throws Exception { + Properties customProperties = new Properties(); + try (FileInputStream fs = new FileInputStream(log4jPath); + InputStream is = targetClass.getResourceAsStream("/log4j.properties")) { + customProperties.load(fs); + Properties originalProperties = new Properties(); + originalProperties.load(is); + for (Map.Entry entry : customProperties.entrySet()) { + originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString()); + } + LogManager.resetConfiguration(); + PropertyConfigurator.configure(originalProperties); + } + } + + static boolean hasAppenders(String logger) { + return Logger.getLogger(logger) + .getAllAppenders() + .hasMoreElements(); + } + + @SuppressWarnings("unchecked") + static void syncLogs() { + // flush standard streams + // + System.out.flush(); + System.err.flush(); + + // flush flushable appenders + // + final Logger rootLogger = Logger.getRootLogger(); + flushAppenders(rootLogger); + final Enumeration allLoggers = rootLogger.getLoggerRepository(). + getCurrentLoggers(); + while (allLoggers.hasMoreElements()) { + final Logger l = allLoggers.nextElement(); + flushAppenders(l); + } + } + + @SuppressWarnings("unchecked") + private static void flushAppenders(Logger l) { + final Enumeration allAppenders = l.getAllAppenders(); + while (allAppenders.hasMoreElements()) { + final Appender a = allAppenders.nextElement(); + if (a instanceof Flushable) { + try { + ((Flushable) a).flush(); + } catch (IOException ioe) { + System.err.println(a + ": Failed to flush!" + + stringifyException(ioe)); + } + } + } + } + + private static String stringifyException(Throwable e) { + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + e.printStackTrace(printWriter); + printWriter.close(); + return stringWriter.toString(); + } + +} diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java new file mode 100644 index 00000000000..1d0bea17337 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.logging; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A bridge class for operating on logging framework, such as changing log4j log level, etc. + * Will call the methods in {@link HadoopInternalLog4jUtils} to perform operations on log4j level. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class HadoopLoggerUtils { + + private static final String INTERNAL_UTILS_CLASS = + "org.apache.hadoop.logging.HadoopInternalLog4jUtils"; + + private HadoopLoggerUtils() { + } + + private static Method getMethod(String methodName, Class... args) { + try { + Class clazz = Class.forName(INTERNAL_UTILS_CLASS); + return clazz.getDeclaredMethod(methodName, args); + } catch (ClassNotFoundException | NoSuchMethodException e) { + throw new AssertionError("should not happen", e); + } + } + + private static void throwUnchecked(Throwable throwable) { + if (throwable instanceof RuntimeException) { + throw (RuntimeException) throwable; + } + if (throwable instanceof Error) { + throw (Error) throwable; + } + } + + public static void shutdownLogManager() { + Method method = getMethod("shutdownLogManager"); + try { + method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static void setLogLevel(String loggerName, String levelName) { + Method method = getMethod("setLogLevel", String.class, String.class); + try { + method.invoke(null, loggerName, levelName); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static String getEffectiveLevel(String loggerName) { + Method method = getMethod("getEffectiveLevel", String.class); + try { + return (String) method.invoke(null, loggerName); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static void resetConfiguration() { + Method method = getMethod("resetConfiguration"); + try { + method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static void updateLog4jConfiguration(Class targetClass, String log4jPath) { + Method method = getMethod("updateLog4jConfiguration", Class.class, String.class); + try { + method.invoke(null, targetClass, log4jPath); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static boolean hasAppenders(String logger) { + Method method = getMethod("hasAppenders", String.class); + try { + return (Boolean) method.invoke(null, logger); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public synchronized static void syncLogs() { + Method method = getMethod("syncLogs"); + try { + method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java rename to hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java index 276e5b0987a..2abfffb474b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.logging.appenders; import java.io.IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java similarity index 93% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java rename to hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java index fffc8a857cb..cf7a2bfe0d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java @@ -16,12 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.util; +package org.apache.hadoop.logging.appenders; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -113,16 +111,13 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { /** * Create an appender to keep track of the errors and warnings logged by the * system. - * - * @param cleanupIntervalSeconds - * the interval at which old messages are purged to prevent the - * message stores from growing unbounded - * @param messageAgeLimitSeconds - * the maximum age of a message in seconds before it is purged from - * the store - * @param maxUniqueMessages - * the maximum number of unique messages of each type we keep before - * we start purging + * + * @param cleanupIntervalSeconds the interval at which old messages are purged to prevent the + * message stores from growing unbounded. + * @param messageAgeLimitSeconds the maximum age of a message in seconds before it is purged from + * the store. + * @param maxUniqueMessages the maximum number of unique messages of each type we keep before + * we start purging. */ public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds, long messageAgeLimitSeconds, int maxUniqueMessages) { @@ -143,6 +138,20 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { this.setThreshold(Level.WARN); } + private static String join(CharSequence separator, String[] strings) { + StringBuilder sb = new StringBuilder(); + boolean first = true; + for (String s : strings) { + if (first) { + first = false; + } else { + sb.append(separator); + } + sb.append(s); + } + return sb.toString(); + } + /** * {@inheritDoc} */ @@ -151,7 +160,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { String message = event.getRenderedMessage(); String[] throwableStr = event.getThrowableStrRep(); if (throwableStr != null) { - message = message + "\n" + StringUtils.join("\n", throwableStr); + message = message + "\n" + join("\n", throwableStr); message = org.apache.commons.lang3.StringUtils.left(message, MAX_MESSAGE_SIZE); } @@ -232,7 +241,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * getErrorMessagesAndCounts since the message store is purged at regular * intervals to prevent it from growing without bounds, while the store for * the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -248,7 +257,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * getWarningMessagesAndCounts since the message store is purged at regular * intervals to prevent it from growing without bounds, while the store for * the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -285,7 +294,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * differ from the ones provided by getErrorCounts since the message store is * purged at regular intervals to prevent it from growing without bounds, * while the store for the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -304,7 +313,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * may differ from the ones provided by getWarningCounts since the message * store is purged at regular intervals to prevent it from growing without * bounds, while the store for the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -322,7 +331,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { SortedSet purgeInformation) { if (purgeInformation.size() > maxUniqueMessages) { ErrorAndWarningsCleanup cleanup = new ErrorAndWarningsCleanup(); - long cutoff = Time.now() - (messageAgeLimitSeconds * 1000); + long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000); cutoff = (cutoff / 1000); cleanup.cleanupMessages(map, purgeInformation, cutoff, maxUniqueMessages); } @@ -379,7 +388,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { @Override public void run() { - long cutoff = Time.now() - (messageAgeLimitSeconds * 1000); + long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000); cutoff = (cutoff / 1000); cleanupMessages(errors, errorsPurgeInformation, cutoff, maxUniqueMessages); cleanupMessages(warnings, warningsPurgeInformation, cutoff, diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java new file mode 100644 index 00000000000..45f5d0ca02d --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.logging; + +import java.io.StringWriter; + +import org.apache.log4j.Appender; +import org.apache.log4j.Layout; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.WriterAppender; + +public class LogCapturer { + private final StringWriter sw = new StringWriter(); + private final Appender appender; + private final Logger logger; + + public static LogCapturer captureLogs(org.slf4j.Logger logger) { + if (logger.getName().equals("root")) { + return new LogCapturer(Logger.getRootLogger()); + } + return new LogCapturer(LogManager.getLogger(logger.getName())); + } + + private LogCapturer(Logger logger) { + this.logger = logger; + Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); + if (defaultAppender == null) { + defaultAppender = Logger.getRootLogger().getAppender("console"); + } + final Layout layout = + (defaultAppender == null) ? new PatternLayout() : defaultAppender.getLayout(); + this.appender = new WriterAppender(layout, sw); + logger.addAppender(this.appender); + } + + public String getOutput() { + return sw.toString(); + } + + public void stopCapturing() { + logger.removeAppender(appender); + } + + public void clearOutput() { + sw.getBuffer().setLength(0); + } +} diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java new file mode 100644 index 00000000000..4bafb5a3153 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.logging.test; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.logging.HadoopLoggerUtils; + +public class TestSyncLogs { + + private static final Logger LOG = LoggerFactory.getLogger(TestSyncLogs.class); + + @Test + public void testSyncLogs() { + LOG.info("Testing log sync"); + HadoopLoggerUtils.syncLogs(); + } + +} diff --git a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties new file mode 100644 index 00000000000..ff1468cf43e --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties @@ -0,0 +1,18 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# log4j configuration used during build and unit tests + +log4j.rootLogger=debug,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index c292aebbe36..d2e993343a2 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -38,11 +38,6 @@ org.apache.kerby kerb-simplekdc - - org.slf4j - slf4j-log4j12 - compile - junit junit diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index f167a079a9b..b0fb88874c9 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -38,6 +38,7 @@ hadoop-minikdc hadoop-kms hadoop-registry + hadoop-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index b362e001ea6..9a1226ea385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -86,6 +86,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> netty-all test + + org.apache.hadoop + hadoop-logging + test + test-jar + org.mock-server mockserver-netty diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index 1fe6dcad932..d0b86534269 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -31,6 +31,7 @@ import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONIT import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Test; @@ -61,8 +62,8 @@ public final class TestURLConnectionFactory { public void testSSLInitFailure() throws Exception { Configuration conf = new Configuration(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo"); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = + LogCapturer.captureLogs( LoggerFactory.getLogger(URLConnectionFactory.class)); URLConnectionFactory.newDefaultURLConnectionFactory(conf); Assert.assertTrue("Expected log for ssl init failure not found!", diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index a5bf5c1c318..b9aae62bd81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -182,6 +182,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-jupiter-params test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 0741f1aed44..9f74337d7ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.Assert.assertEquals; @@ -48,8 +49,8 @@ public class TestRouterRefreshFairnessPolicyController { private static final Logger LOG = LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class); - private final GenericTestUtils.LogCapturer controllerLog = - GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG); + private final LogCapturer controllerLog = + LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG); private StateStoreDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java index 1f5770b1dda..d4f68271350 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.federation.router.FederationUtil; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.junit.Test; import org.slf4j.LoggerFactory; @@ -179,7 +179,7 @@ public class TestRouterRpcFairnessPolicyController { private void verifyInstantiationError(Configuration conf, int handlerCount, int totalDedicatedHandlers) { - GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer + LogCapturer logs = LogCapturer .captureLogs(LoggerFactory.getLogger( StaticRouterRpcFairnessPolicyController.class)); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 9ee9692aad1..bb81eaa070b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; @@ -55,6 +54,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -322,11 +322,7 @@ public class TestRouterNamenodeMonitoring { int httpsRequests, int requestsPerService) { // Attach our own log appender so we can verify output - final LogVerificationAppender appender = - new LogVerificationAppender(); - final org.apache.log4j.Logger logger = - org.apache.log4j.Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); GenericTestUtils.setRootLogLevel(Level.DEBUG); // Setup and start the Router @@ -347,8 +343,11 @@ public class TestRouterNamenodeMonitoring { heartbeatService.getNamenodeStatusReport(); } } - assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://")); - assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://")); + assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + "JMX URL: https://")); + assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + "JMX URL: http://")); + logCapturer.stopCapturing(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index d3d34216190..3db20a6e180 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -135,6 +135,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.AfterClass; @@ -2067,8 +2069,8 @@ public class TestRouterRpc { @Test public void testMkdirsWithCallerContext() throws IOException { - GenericTestUtils.LogCapturer auditlog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditlog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2094,8 +2096,8 @@ public class TestRouterRpc { @Test public void testRealUserPropagationInCallerContext() throws IOException, InterruptedException { - GenericTestUtils.LogCapturer auditlog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditlog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2139,8 +2141,8 @@ public class TestRouterRpc { @Test public void testAddClientIpPortToCallerContext() throws IOException { - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientIp and ClientPort are not set on the client. // Set client context. @@ -2174,8 +2176,8 @@ public class TestRouterRpc { @Test public void testAddClientIdAndCallIdToCallerContext() throws IOException { - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientId and ClientCallId are not set on the client. // Set client context. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 336ea391385..caecb697d6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -72,6 +72,8 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.Test; import org.slf4j.event.Level; @@ -276,12 +278,10 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc { @Test public void testPreviousBlockNotNull() throws IOException, URISyntaxException { - final GenericTestUtils.LogCapturer stateChangeLog = - GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog); + final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog); GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG); - final GenericTestUtils.LogCapturer nameNodeLog = - GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG); + final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG); GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG); final FederationRPCMetrics metrics = getRouterContext(). @@ -454,8 +454,8 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc { @Test public void testCallerContextWithMultiDestinations() throws IOException { - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // set client context CallerContext.setCurrent( diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 8632c567aa1..5c2df9acf4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -310,14 +310,4 @@ - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 5f156499ee0..a8922cbcff3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -164,6 +164,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-minikdc test + + org.apache.hadoop + hadoop-logging + test + test-jar + org.mockito mockito-core diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java index 21c01cebd40..a361a280e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java @@ -31,6 +31,8 @@ import javax.management.ObjectName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.metrics2.util.MBeans; /** @@ -111,11 +113,8 @@ public class MetricsLoggerTask implements Runnable { .substring(0, maxLogLineLength) + "..."); } - // TODO : hadoop-logging module to hide log4j implementation details, this method - // can directly call utility from hadoop-logging. private static boolean hasAppenders(Logger logger) { - return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders() - .hasMoreElements(); + return HadoopLoggerUtils.hasAppenders(logger.getName()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java index ab301104f2e..4e8daf319a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java @@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics; import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor; import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -110,13 +110,13 @@ public class FsImageValidation { } static void initLogLevels() { - Util.setLogLevel(FSImage.class, Level.TRACE); - Util.setLogLevel(FileJournalManager.class, Level.TRACE); + Util.setLogLevel(FSImage.class, "TRACE"); + Util.setLogLevel(FileJournalManager.class, "TRACE"); - Util.setLogLevel(GSet.class, Level.OFF); - Util.setLogLevel(BlockManager.class, Level.OFF); - Util.setLogLevel(DatanodeManager.class, Level.OFF); - Util.setLogLevel(TopMetrics.class, Level.OFF); + Util.setLogLevel(GSet.class, "OFF"); + Util.setLogLevel(BlockManager.class, "OFF"); + Util.setLogLevel(DatanodeManager.class, "OFF"); + Util.setLogLevel(TopMetrics.class, "OFF"); } static class Util { @@ -127,11 +127,10 @@ public class FsImageValidation { + ", max=" + StringUtils.byteDesc(runtime.maxMemory()); } - static void setLogLevel(Class clazz, Level level) { - final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz); - logger.setLevel(level); + static void setLogLevel(Class clazz, String level) { + HadoopLoggerUtils.setLogLevel(clazz.getName(), level); LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level, - logger.getEffectiveLevel()); + HadoopLoggerUtils.getEffectiveLevel(clazz.getName())); } static String toCommaSeparatedNumber(long n) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java deleted file mode 100644 index 10ef47bbbc3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.spi.LoggingEvent; -import org.apache.log4j.spi.ThrowableInformation; - -/** - * Used to verify that certain exceptions or messages are present in log output. - */ -public class LogVerificationAppender extends AppenderSkeleton { - private final List log = new ArrayList(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList(log); - } - - public int countExceptionsWithMessage(final String text) { - int count = 0; - for (LoggingEvent e: getLog()) { - ThrowableInformation t = e.getThrowableInformation(); - if (t != null) { - String m = t.getThrowable().getMessage(); - if (m.contains(text)) { - count++; - } - } - } - return count; - } - - public int countLinesWithMessage(final String text) { - int count = 0; - for (LoggingEvent e: getLog()) { - String msg = e.getRenderedMessage(); - if (msg != null && msg.contains(text)) { - count++; - } - } - return count; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index b16f0237b1e..75ad5bd862f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -33,7 +33,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.Test; public class TestDFSRename { @@ -189,8 +190,8 @@ public class TestDFSRename { final DistributedFileSystem dfs = cluster.getFileSystem(); Path path = new Path("/test"); dfs.mkdirs(path); - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); dfs.rename(path, new Path("/dir1"), new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH}); String auditOut = auditLog.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 5469ebbb757..80424a388b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -45,9 +45,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Logger; import org.junit.Test; import static org.junit.Assert.*; @@ -317,9 +317,7 @@ public class TestDFSUpgradeFromImage { "imageMD5Digest", "22222222222222222222222222222222"); // Attach our own log appender so we can verify output - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); // Upgrade should now fail try { @@ -331,9 +329,10 @@ public class TestDFSUpgradeFromImage { if (!msg.contains("Failed to load FSImage file")) { throw ioe; } - int md5failures = appender.countExceptionsWithMessage( + int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), " is corrupt with MD5 checksum of "); assertEquals("Upgrade did not fail with bad MD5", 1, md5failures); + logCapturer.stopCapturing(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java index c57ef941f0e..c792386c0ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -48,7 +48,7 @@ public class TestDataStream { @Test(timeout = 60000) public void testDfsClient() throws IOException, InterruptedException { - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory + LogCapturer logs = LogCapturer.captureLogs(LoggerFactory .getLogger(DataStreamer.class)); byte[] toWrite = new byte[PACKET_SIZE]; new Random(1).nextBytes(toWrite); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index f9336fcfdc7..4299c111967 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.junit.After; import org.junit.Before; @@ -168,9 +168,9 @@ public class TestEncryptedTransfer { FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(SaslDataTransferServer.class)); - LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs1 = LogCapturer.captureLogs( LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); @@ -239,7 +239,7 @@ public class TestEncryptedTransfer { Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(DataNode.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); @@ -457,9 +457,9 @@ public class TestEncryptedTransfer { fs = getFileSystem(conf); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(SaslDataTransferServer.class)); - LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs1 = LogCapturer.captureLogs( LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java index 3dd0b7eb99e..c6561287bb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java @@ -54,7 +54,7 @@ import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.After; import org.junit.Assert; import org.junit.Rule; @@ -138,7 +138,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase { HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(DataNode.class)); try { doTest(clientConf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java index 82b8b587694..84b7c8f224c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index d69051c8d7a..5d2a9270640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java index d32cde83473..7e926a994f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.Whitebox; import org.assertj.core.api.Assertions; @@ -235,8 +236,8 @@ public class TestBlockManagerSafeMode { public void testCheckSafeMode9() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG); + LogCapturer logs = + LogCapturer.captureLogs(BlockManagerSafeMode.LOG); BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm, fsn, true, conf); String content = logs.getOutput(); @@ -247,8 +248,8 @@ public class TestBlockManagerSafeMode { public void testCheckSafeMode10(){ Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG); + LogCapturer logs = + LogCapturer.captureLogs(BlockManagerSafeMode.LOG); BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm, fsn, true, conf); String content = logs.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java index ea7347f9e50..87c83836e78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.mockito.Mockito; import org.slf4j.LoggerFactory; @@ -575,7 +575,7 @@ public class TestPendingReconstruction { new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); DFSTestUtil.setNameNodeLogLevel(Level.DEBUG); - LogCapturer logs = GenericTestUtils.LogCapturer + LogCapturer logs = LogCapturer .captureLogs(LoggerFactory.getLogger("BlockStateChange")); BlockManager bm = cluster.getNamesystem().getBlockManager(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 20163cc5fa5..c4b5f7aa6a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSI import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -41,6 +40,7 @@ import java.util.Set; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.fs.ContentSummary; @@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; @@ -67,16 +66,15 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.slf4j.LoggerFactory; @RunWith(Parameterized.class) public class TestReplicationPolicy extends BaseReplicationPolicyTest { @@ -507,26 +505,26 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } - - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - + + final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + // try to choose NUM_OF_DATANODES which is more than actually available // nodes. DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length); assertEquals(targets.length, dataNodes.length - 2); - final List log = appender.getLog(); - assertNotNull(log); - assertFalse(log.size() == 0); - final LoggingEvent lastLogEntry = log.get(log.size() - 1); - - assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel())); - // Suppose to place replicas on each node but two data nodes are not - // available for placing replica, so here we expect a short of 2 - assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2")); - + boolean isFound = false; + for (String logLine : logCapturer.getOutput().split("\n")) { + // Suppose to place replicas on each node but two data nodes are not + // available for placing replica, so here we expect a short of 2 + if(logLine.contains("WARN") && logLine.contains("in need of 2")) { + isFound = true; + break; + } + } + assertTrue("Could not find the block placement log specific to 2 datanodes not being " + + "available for placing replicas", isFound); + logCapturer.stopCapturing(); resetHeartbeatForStorages(); } @@ -1710,17 +1708,14 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { @Test public void testChosenFailureForStorageType() { - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - + final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1, dataNodes[0], new ArrayList(), false, null, BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy( HdfsConstants.StoragePolicy.COLD.value()), null); assertEquals(0, targets.length); assertNotEquals(0, - appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE")); + StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE")); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 73201ba6054..13efcf783a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -27,7 +27,6 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Collections; -import java.util.List; import java.util.Random; import java.util.concurrent.TimeoutException; @@ -39,19 +38,15 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.AsyncAppender; import org.junit.After; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import java.util.function.Supplier; - /** * Test periodic logging of DataNode metrics. */ @@ -128,13 +123,13 @@ public class TestDataNodeMetricsLogger { } @Test + @SuppressWarnings("unchecked") public void testMetricsLoggerIsAsync() throws IOException { startDNForTest(true); assertNotNull(dn); - org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); + assertTrue(Collections.list( + org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders()) + .get(0) instanceof org.apache.log4j.AsyncAppender); } /** @@ -149,27 +144,15 @@ public class TestDataNodeMetricsLogger { metricsProvider); startDNForTest(true); assertNotNull(dn); - final PatternMatchingAppender appender = - (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME) - .getAppender("PATTERNMATCHERAPPENDER"); - + LogCapturer logCapturer = + LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME)); // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return appender.isMatched(); - } - }, 1000, 60000); - + GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"), + 1000, 60000); + logCapturer.stopCapturing(); dn.shutdown(); } - private void addAppender(org.apache.log4j.Logger logger, Appender appender) { - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - ((AsyncAppender) appenders.get(0)).addAppender(appender); - } - public interface TestFakeMetricMXBean { int getFakeMetric(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 74c70cec769..82d7a815748 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -27,7 +27,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -77,10 +76,9 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; -import org.apache.log4j.SimpleLayout; -import org.apache.log4j.WriterAppender; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -414,14 +412,9 @@ public class TestDirectoryScanner { @Test(timeout=600000) public void testScanDirectoryStructureWarn() throws Exception { + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); //add a logger stream to check what has printed to log - ByteArrayOutputStream loggerStream = new ByteArrayOutputStream(); - org.apache.log4j.Logger rootLogger = - org.apache.log4j.Logger.getRootLogger(); GenericTestUtils.setRootLogLevel(Level.INFO); - WriterAppender writerAppender = - new WriterAppender(new SimpleLayout(), loggerStream); - rootLogger.addAppender(writerAppender); Configuration conf = getConfiguration(); cluster = new MiniDFSCluster @@ -452,7 +445,7 @@ public class TestDirectoryScanner { scan(1, 1, 0, 1, 0, 0, 0); //ensure the warn log not appear and missing block log do appear - String logContent = new String(loggerStream.toByteArray()); + String logContent = logCapturer.getOutput(); String missingBlockWarn = "Deleted a metadata file" + " for the deleted block"; String dirStructureWarnLog = " found in invalid directory." + @@ -464,6 +457,7 @@ public class TestDirectoryScanner { LOG.info("check pass"); } finally { + logCapturer.stopCapturing(); if (scanner != null) { scanner.shutdown(); scanner = null; @@ -526,7 +520,7 @@ public class TestDirectoryScanner { client = cluster.getFileSystem().getClient(); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1); // log trace - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer. + LogCapturer logCapturer = LogCapturer. captureLogs(NameNode.stateChangeLog); // Add files with 5 blocks createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java index 8b1a6c0814c..c7fc71f5375 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import net.jcip.annotations.NotThreadSafe; + +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; @@ -51,7 +53,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; @@ -79,10 +80,10 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -393,9 +394,7 @@ public class TestFsDatasetCache { } // nth file should hit a capacity exception - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1])); GenericTestUtils.waitFor(new Supplier() { @@ -403,11 +402,12 @@ public class TestFsDatasetCache { public Boolean get() { // check the log reported by FsDataSetCache // in the case that cache capacity is exceeded. - int lines = appender.countLinesWithMessage( + int lines = StringUtils.countMatches(logCapturer.getOutput(), "could not reserve more bytes in the cache: "); return lines > 0; } }, 500, 30000); + logCapturer.stopCapturing(); // Also check the metrics for the failure assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index 073bb532ddf..8f3ef447a6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; @@ -321,7 +322,7 @@ public class TestDiskBalancer { 0); DFSTestUtil.waitReplication(fs, filePath, (short) 1); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + LogCapturer logCapturer = LogCapturer .captureLogs(DiskBalancer.LOG); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java deleted file mode 100644 index f099dfae733..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.namenode; - -import java.util.regex.Pattern; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.spi.LoggingEvent; - -/** - * An appender that matches logged messages against the given - * regular expression. - */ -public class PatternMatchingAppender extends AppenderSkeleton { - private final Pattern pattern; - private volatile boolean matched; - - public PatternMatchingAppender() { - this.pattern = Pattern.compile("^.*FakeMetric.*$"); - this.matched = false; - } - - public boolean isMatched() { - return matched; - } - - @Override - protected void append(LoggingEvent event) { - if (pattern.matcher(event.getMessage().toString()).matches()) { - matched = true; - } - } - - @Override - public void close() { - } - - @Override - public boolean requiresLayout() { - return false; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index c00649a9db5..617f38a63f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -37,7 +37,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authorize.ProxyServers; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Lists; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java index d34d6ca7379..fec16c13fd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java @@ -41,7 +41,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import java.io.IOException; import java.security.PrivilegedExceptionAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 0f736696751..953d1ef7c02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -24,7 +24,6 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; @@ -39,12 +38,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.log4j.Appender; -import org.apache.log4j.AsyncAppender; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; @@ -107,6 +103,7 @@ public class TestAuditLogs { UserGroupInformation userGroupInfo; @Before + @SuppressWarnings("unchecked") public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled @@ -122,11 +119,9 @@ public class TestAuditLogs { util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be - Logger logger = org.apache.log4j.Logger.getLogger( - "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit"); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); + assertTrue(Collections.list(org.apache.log4j.Logger.getLogger( + "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders()) + .get(0) instanceof org.apache.log4j.AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index d675dcda988..ccc6be33c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -82,7 +82,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; @@ -863,7 +863,7 @@ public class TestCheckpoint { savedSd = sd; } - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(Storage.class)); try { // try to lock the storage that's already locked diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java index 771caefd20a..73aee349da1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 17803a07869..c68ad185707 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; @@ -90,9 +91,6 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.LogManager; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -1717,36 +1715,13 @@ public class TestEditLog { } } - class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } - /** * * @throws Exception */ @Test public void testReadActivelyUpdatedLog() throws Exception { - final TestAppender appender = new TestAppender(); - LogManager.getRootLogger().addAppender(appender); + final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); // Set single handler thread, so all transactions hit same thread-local ops. @@ -1794,21 +1769,16 @@ public class TestEditLog { rwf.close(); events.poll(); - String pattern = "Caught exception after reading (.*) ops"; - Pattern r = Pattern.compile(pattern); - final List log = appender.getLog(); - for (LoggingEvent event : log) { - Matcher m = r.matcher(event.getRenderedMessage()); - if (m.find()) { + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine != null && logLine.contains("Caught exception after reading")) { fail("Should not try to read past latest syned edit log op"); } } - } finally { if (cluster != null) { cluster.shutdown(); } - LogManager.getRootLogger().removeAppender(appender); + logCapturer.stopCapturing(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java index 3b15c2db7a9..fb484cd3ea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java @@ -26,6 +26,8 @@ import java.io.IOException; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.Assert; import org.junit.Test; @@ -118,8 +120,8 @@ public class TestEditsDoubleBuffer { op3.setTransactionId(3); buffer.writeOp(op3, fakeLogVersion); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG); + LogCapturer logs = + LogCapturer.captureLogs(EditsDoubleBuffer.LOG); try { buffer.close(); fail(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 89193ca6633..860e6b0b256 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.FakeTimer; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index f0ae1810167..afb049156e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -25,7 +25,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java index 9c77f9d92b8..08c9240f26b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -58,7 +60,7 @@ public class TestFSNamesystemLockReport { private MiniDFSCluster cluster; private FileSystem fs; private UserGroupInformation userGroupInfo; - private GenericTestUtils.LogCapturer logs; + private LogCapturer logs; @Before public void setUp() throws Exception { @@ -76,7 +78,7 @@ public class TestFSNamesystemLockReport { userGroupInfo = UserGroupInformation.createUserForTesting("bob", new String[] {"hadoop"}); - logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG); + logs = LogCapturer.captureLogs(FSNamesystem.LOG); GenericTestUtils .setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()), org.slf4j.event.Level.INFO); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index a312b03168b..96650a4d5ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -115,7 +115,7 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.AfterClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 464fdfcd6c4..651d4f31c9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -18,15 +18,13 @@ package org.apache.hadoop.hdfs.server.namenode; -import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.AsyncAppender; import org.junit.Rule; import org.junit.Test; @@ -34,7 +32,6 @@ import org.junit.rules.Timeout; import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; @@ -64,12 +61,12 @@ public class TestNameNodeMetricsLogger { } @Test + @SuppressWarnings("unchecked") public void testMetricsLoggerIsAsync() throws IOException { makeNameNode(true); org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); + assertTrue(Collections.list(logger.getAllAppenders()).get(0) + instanceof org.apache.log4j.AsyncAppender); } /** @@ -80,20 +77,14 @@ public class TestNameNodeMetricsLogger { public void testMetricsLogOutput() throws IOException, InterruptedException, TimeoutException { TestFakeMetric metricsProvider = new TestFakeMetric(); - MBeans.register(this.getClass().getSimpleName(), - "DummyMetrics", metricsProvider); + MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider); makeNameNode(true); // Log metrics early and often. - final PatternMatchingAppender appender = - (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME) - .getAppender("PATTERNMATCHERAPPENDER"); + LogCapturer logCapturer = + LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME)); - // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return appender.isMatched(); - } - }, 1000, 60000); + GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"), + 1000, 60000); + logCapturer.stopCapturing(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java index 073ee377819..8750154077f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java @@ -28,7 +28,8 @@ import java.util.Collection; import org.junit.Test; import org.slf4j.LoggerFactory; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; + +import org.apache.hadoop.logging.LogCapturer; public class TestNameNodeResourcePolicy { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 67c8f3c18f1..7ea0b24f2be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -69,12 +68,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -524,10 +523,8 @@ public class TestStartup { // Corrupt the md5 files in all the namedirs corruptFSImageMD5(true); - // Attach our own log appender so we can verify output - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + // Attach our own log appender so we can verify output + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); // Try to start a new cluster LOG.info("\n===========================================\n" + @@ -541,10 +538,13 @@ public class TestStartup { } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Failed to load FSImage file", ioe); - int md5failures = appender.countExceptionsWithMessage( - " is corrupt with MD5 checksum of "); + + int md5failures = + org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + " is corrupt with MD5 checksum of "); // Two namedirs, so should have seen two failures assertEquals(2, md5failures); + logCapturer.stopCapturing(); } } finally { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 0e83bec11f3..7376237a4c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -197,7 +197,7 @@ public class TestBootstrapStandby { // Trying to bootstrap standby should now fail since the edit // logs aren't available in the shared dir. - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(BootstrapStandby.class)); try { assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 168273117b5..6fa979d039a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -44,6 +44,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.Whitebox; import org.junit.After; import org.junit.Before; @@ -143,7 +144,7 @@ public class TestDelegationTokensWithHA { () -> (DistributedFileSystem) FileSystem.get(conf)); GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer + LogCapturer logCapture = LogCapturer .captureLogs(ObserverReadProxyProvider.LOG); try { dfs.access(new Path("/"), FsAction.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 513f60cb1ed..3dbadcaaf08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -37,7 +37,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.common.Util; @@ -48,12 +47,12 @@ import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; -import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -299,39 +298,38 @@ public class TestStandbyCheckpoints { @Test(timeout = 30000) public void testCheckpointBeforeNameNodeInitializationIsComplete() throws Exception { - final LogVerificationAppender appender = new LogVerificationAppender(); - final org.apache.log4j.Logger logger = org.apache.log4j.Logger - .getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); - // Transition 2 to observer - cluster.transitionToObserver(2); - doEdits(0, 10); - // After a rollEditLog, Standby(nn1)'s next checkpoint would be - // ahead of observer(nn2). - nns[0].getRpcServer().rollEditLog(); + try { + // Transition 2 to observer + cluster.transitionToObserver(2); + doEdits(0, 10); + // After a rollEditLog, Standby(nn1)'s next checkpoint would be + // ahead of observer(nn2). + nns[0].getRpcServer().rollEditLog(); - NameNode nn2 = nns[2]; - FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); + NameNode nn2 = nns[2]; + FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); - // After standby creating a checkpoint, it will try to push the image to - // active and all observer, updating it's own txid to the most recent. - HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); - HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); + // After standby creating a checkpoint, it will try to push the image to + // active and all observer, updating it's own txid to the most recent. + HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); + HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); - NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); - cluster.transitionToStandby(2); - logger.removeAppender(appender); + NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); + cluster.transitionToStandby(2); - for (LoggingEvent event : appender.getLog()) { - String message = event.getRenderedMessage(); - if (message.contains("PutImage failed") && - message.contains("FSImage has not been set in the NameNode.")) { - //Logs have the expected exception. - return; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine != null && logLine.contains("PutImage failed") && logLine.contains( + "FSImage has not been set in the NameNode.")) { + //Logs have the expected exception. + return; + } } + fail("Expected exception not present in logs."); + } finally { + logCapturer.stopCapturing(); } - fail("Expected exception not present in logs."); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java index 58d72f14d73..3741bbf015f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java @@ -93,7 +93,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.ExitUtil; import org.junit.After; @@ -1372,7 +1372,7 @@ public class TestExternalStoragePolicySatisfier { Path filePath = new Path("/zeroSizeFile"); DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0); fs.setReplication(filePath, (short) 3); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class)); fs.setStoragePolicy(filePath, "COLD"); fs.satisfyStoragePolicy(filePath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties index 368deef4020..b739b25f352 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties @@ -22,9 +22,6 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n -# Only to be used for testing -log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender - # # NameNode metrics logging. # The default is to retain two namenode-metrics.log files up to 64MB each. @@ -32,10 +29,10 @@ log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.Pat # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref -namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER +namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger} log4j.additivity.NameNodeMetricsLog=false -log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender +log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log @@ -48,10 +45,10 @@ log4j.appender.ASYNCNNMETRICSRFA.maxBackupIndex=1 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref -datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER +datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger} log4j.additivity.DataNodeMetricsLog=false -log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender +log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log @@ -72,7 +69,7 @@ hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false -log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender +log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender log4j.appender.ASYNCAUDITAPPENDER.blocking=false log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256 log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index e3b3511c0ce..142c1ab31d1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -124,6 +124,12 @@ assertj-core test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 15682eeefc6..cb5f3edd054 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -36,9 +36,10 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; + +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.junit.After; @@ -107,12 +108,10 @@ import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ControlledClock; import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.mockito.ArgumentCaptor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -128,29 +127,6 @@ public class TestTaskAttempt{ } } - private static class TestAppender extends AppenderSkeleton { - - private final List logEvents = new CopyOnWriteArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } - - @Override - protected void append(LoggingEvent arg0) { - logEvents.add(arg0); - } - - private List getLogEvents() { - return logEvents; - } - } - @BeforeClass public static void setupBeforeClass() { ResourceUtils.resetResourceTypes(new Configuration()); @@ -1724,11 +1700,10 @@ public class TestTaskAttempt{ for (String memoryName : ImmutableList.of( MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - TestAppender testAppender = new TestAppender(); - final Logger logger = Logger.getLogger(TaskAttemptImpl.class); + final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class); + LogCapturer logCapturer = LogCapturer.captureLogs(logger); try { TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear(); - logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); JobConf jobConf = new JobConf(); @@ -1741,13 +1716,11 @@ public class TestTaskAttempt{ getResourceInfoFromContainerRequest(taImpl, eventHandler). getMemorySize(); assertEquals(3072, memorySize); - assertTrue(testAppender.getLogEvents().stream() - .anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " + - "mapreduce.reduce.resource." + memoryName + "=3Gi is " + - "overriding the mapreduce.reduce.memory.mb=2048 configuration") - .equals(e.getMessage()))); + assertTrue(logCapturer.getOutput().contains( + "Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is " + + "overriding the mapreduce.reduce.memory.mb=2048 configuration")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } } @@ -1809,10 +1782,9 @@ public class TestTaskAttempt{ @Test public void testReducerCpuRequestOverriding() { - TestAppender testAppender = new TestAppender(); - final Logger logger = Logger.getLogger(TaskAttemptImpl.class); + final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class); + final LogCapturer logCapturer = LogCapturer.captureLogs(logger); try { - logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); JobConf jobConf = new JobConf(); @@ -1825,13 +1797,11 @@ public class TestTaskAttempt{ getResourceInfoFromContainerRequest(taImpl, eventHandler). getVirtualCores(); assertEquals(7, vCores); - assertTrue(testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "mapreduce.reduce.resource.vcores=7 is overriding the " + - "mapreduce.reduce.cpu.vcores=9 configuration").equals( - e.getMessage()))); + assertTrue(logCapturer.getOutput().contains( + "Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the " + + "mapreduce.reduce.cpu.vcores=9 configuration")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index 7530428d752..d124c97e9da 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -72,6 +72,12 @@ assertj-core test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java index a0223dedd64..43ab1701601 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java @@ -23,12 +23,10 @@ import java.io.BufferedReader; import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; -import java.io.Flushable; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; -import java.util.Enumeration; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -44,16 +42,13 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SecureIOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.util.ProcessTree; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.log4j.Appender; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; @@ -276,42 +271,7 @@ public class TaskLog { } // flush & close all appenders - LogManager.shutdown(); - } - - @SuppressWarnings("unchecked") - public static synchronized void syncLogs() { - // flush standard streams - // - System.out.flush(); - System.err.flush(); - - // flush flushable appenders - // - final Logger rootLogger = Logger.getRootLogger(); - flushAppenders(rootLogger); - final Enumeration allLoggers = rootLogger.getLoggerRepository(). - getCurrentLoggers(); - while (allLoggers.hasMoreElements()) { - final Logger l = allLoggers.nextElement(); - flushAppenders(l); - } - } - - @SuppressWarnings("unchecked") - private static void flushAppenders(Logger l) { - final Enumeration allAppenders = l.getAllAppenders(); - while (allAppenders.hasMoreElements()) { - final Appender a = allAppenders.nextElement(); - if (a instanceof Flushable) { - try { - ((Flushable) a).flush(); - } catch (IOException ioe) { - System.err.println(a + ": Failed to flush!" - + StringUtils.stringifyException(ioe)); - } - } - } + HadoopLoggerUtils.shutdownLogManager(); } public static ScheduledExecutorService createLogSyncer() { @@ -336,7 +296,7 @@ public class TaskLog { new Runnable() { @Override public void run() { - TaskLog.syncLogs(); + HadoopLoggerUtils.syncLogs(); } }, 0L, 5L, TimeUnit.SECONDS); return scheduler; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java index e91b4c1e854..f83835f5383 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java @@ -28,24 +28,19 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.LineNumberReader; -import java.io.StringReader; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapred.TaskReport; import org.apache.hadoop.mapreduce.JobStatus.State; import org.apache.hadoop.mapreduce.protocol.ClientProtocol; -import org.apache.log4j.Layout; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.WriterAppender; import org.mockito.stubbing.Answer; +import org.slf4j.LoggerFactory; /** * Test to make sure that command line output for @@ -73,55 +68,53 @@ public class TestJobMonitorAndPrint { @Test public void testJobMonitorAndPrint() throws Exception { - JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, - 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", - "tmp-queue", "tmp-jobfile", "tmp-url", true); - JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, - 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", - "tmp-queue", "tmp-jobfile", "tmp-url", true); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class)); + try { + JobStatus jobStatus_1 = + new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING, + JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", + true); + JobStatus jobStatus_2 = + new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, + "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); - doAnswer((Answer) invocation -> - TaskCompletionEvent.EMPTY_ARRAY).when(job) - .getTaskCompletionEvents(anyInt(), anyInt()); + doAnswer((Answer) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when( + job).getTaskCompletionEvents(anyInt(), anyInt()); - doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); - when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); - // setup the logger to capture all logs - Layout layout = - Logger.getRootLogger().getAppender("stdout").getLayout(); - ByteArrayOutputStream os = new ByteArrayOutputStream(); - WriterAppender appender = new WriterAppender(layout, os); - appender.setThreshold(Level.ALL); - Logger qlogger = Logger.getLogger(Job.class); - qlogger.addAppender(appender); + doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); + when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); - job.monitorAndPrintJob(); + job.monitorAndPrintJob(); - qlogger.removeAppender(appender); - LineNumberReader r = new LineNumberReader(new StringReader(os.toString())); - String line; - boolean foundHundred = false; - boolean foundComplete = false; - boolean foundUber = false; - String uberModeMatch = "uber mode : true"; - String progressMatch = "map 100% reduce 100%"; - String completionMatch = "completed successfully"; - while ((line = r.readLine()) != null) { - if (line.contains(uberModeMatch)) { - foundUber = true; + boolean foundHundred = false; + boolean foundComplete = false; + boolean foundUber = false; + String uberModeMatch = "uber mode : true"; + String progressMatch = "map 100% reduce 100%"; + String completionMatch = "completed successfully"; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine.contains(uberModeMatch)) { + foundUber = true; + } + if (logLine.contains(progressMatch)) { + foundHundred = true; + } + if (logLine.contains(completionMatch)) { + foundComplete = true; + } + if (foundUber && foundHundred && foundComplete) { + break; + } } - foundHundred = line.contains(progressMatch); - if (foundHundred) - break; - } - line = r.readLine(); - foundComplete = line.contains(completionMatch); - assertTrue(foundUber); - assertTrue(foundHundred); - assertTrue(foundComplete); + assertTrue(foundUber); + assertTrue(foundHundred); + assertTrue(foundComplete); - System.out.println("The output of job.toString() is : \n" + job.toString()); - assertTrue(job.toString().contains("Number of maps: 5\n")); - assertTrue(job.toString().contains("Number of reduces: 5\n")); + System.out.println("The output of job.toString() is : \n" + job.toString()); + assertTrue(job.toString().contains("Number of maps: 5\n")); + assertTrue(job.toString().contains("Number of reduces: 5\n")); + } finally { + logCapturer.stopCapturing(); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 17358a37da3..632e972d5ab 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -128,6 +128,12 @@ assertj-core test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 0bdc7212179..063f185d3d7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -34,7 +34,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -45,7 +44,6 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; @@ -55,6 +53,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobPriority; import org.apache.hadoop.mapreduce.JobStatus.State; @@ -110,13 +109,6 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Layout; -import org.apache.log4j.Level; -import org.apache.log4j.SimpleLayout; -import org.apache.log4j.WriterAppender; -import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -144,29 +136,6 @@ public class TestYARNRunner { MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%")); private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource"; - private static class TestAppender extends AppenderSkeleton { - - private final List logEvents = new CopyOnWriteArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } - - @Override - protected void append(LoggingEvent arg0) { - logEvents.add(arg0); - } - - private List getLogEvents() { - return logEvents; - } - } - private YARNRunner yarnRunner; private ResourceMgrDelegate resourceMgrDelegate; private YarnConfiguration conf; @@ -549,38 +518,48 @@ public class TestYARNRunner { assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex); } } + @Test(timeout=20000) public void testWarnCommandOpts() throws Exception { - org.apache.log4j.Logger logger = - org.apache.log4j.Logger.getLogger(YARNRunner.class); - - ByteArrayOutputStream bout = new ByteArrayOutputStream(); - Layout layout = new SimpleLayout(); - Appender appender = new WriterAppender(layout, bout); - logger.addAppender(appender); - - JobConf jobConf = new JobConf(); - - jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); - jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); - - YARNRunner yarnRunner = new YARNRunner(jobConf); - - @SuppressWarnings("unused") - ApplicationSubmissionContext submissionContext = - buildSubmitContext(yarnRunner, jobConf); - - String logMsg = bout.toString(); - assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + - "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + - "longer function if hadoop native libraries are used. These values " + - "should be set as part of the LD_LIBRARY_PATH in the app master JVM " + - "env using yarn.app.mapreduce.am.admin.user.env config settings.")); - assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + - "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + - "function if hadoop native libraries are used. These values should " + - "be set as part of the LD_LIBRARY_PATH in the app master JVM env " + - "using yarn.app.mapreduce.am.env config settings.")); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class)); + try { + JobConf jobConf = new JobConf(); + + jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, + "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); + jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); + + YARNRunner yarnRunner = new YARNRunner(jobConf); + + @SuppressWarnings("unused") + ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf); + + boolean isFoundOne = false; + boolean isFoundTwo = false; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine == null) { + continue; + } + if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + + "longer function if hadoop native libraries are used. These values " + + "should be set as part of the LD_LIBRARY_PATH in the app master JVM " + + "env using yarn.app.mapreduce.am.admin.user.env config settings.")) { + isFoundOne = true; + } + if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + + "function if hadoop native libraries are used. These values should " + + "be set as part of the LD_LIBRARY_PATH in the app master JVM env " + + "using yarn.app.mapreduce.am.env config settings.")) { + isFoundTwo = true; + } + } + assertTrue(isFoundOne); + assertTrue(isFoundTwo); + } finally { + logCapturer.stopCapturing(); + } } @Test(timeout=20000) @@ -996,10 +975,7 @@ public class TestYARNRunner { for (String memoryName : ImmutableList.of( MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - TestAppender testAppender = new TestAppender(); - org.apache.log4j.Logger logger = - org.apache.log4j.Logger.getLogger(YARNRunner.class); - logger.addAppender(testAppender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class)); try { JobConf jobConf = new JobConf(); jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi"); @@ -1017,13 +993,17 @@ public class TestYARNRunner { long memorySize = resourceRequest.getCapability().getMemorySize(); Assert.assertEquals(3072, memorySize); - assertTrue(testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " + - "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + - "configuration").equals(e.getMessage()))); + boolean isLogFound = false; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine != null && logLine.contains("WARN") && logLine.contains( + "Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " + + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) { + isLogFound = true; + } + } + assertTrue("Log line could not be found", isLogFound); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java index 338f1172b04..cc93e5629d1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java @@ -29,8 +29,6 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; -import org.apache.log4j.Level; -import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -76,12 +74,10 @@ public class TestChild extends HadoopTestCase { mapJavaOpts, mapJavaOpts, MAP_OPTS_VAL); } - - Level logLevel = - Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, - Level.INFO.toString())); - assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + - logLevel, logLevel, Level.OFF); + + String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO"); + assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel, + "OFF"); } } @@ -108,12 +104,10 @@ public class TestChild extends HadoopTestCase { reduceJavaOpts, reduceJavaOpts, REDUCE_OPTS_VAL); } - - Level logLevel = - Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, - Level.INFO.toString())); - assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + - logLevel, logLevel, Level.OFF); + + String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO"); + assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel, + "OFF"); } } @@ -127,9 +121,9 @@ public class TestChild extends HadoopTestCase { conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL); conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL); } - - conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString()); - conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString()); + + conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF"); + conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF"); Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, numMaps, numReds); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java index 9e58d460d17..d1fc8c04aa1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java @@ -25,6 +25,7 @@ import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Assert; @@ -50,8 +51,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,8 +63,7 @@ public class TestJHSSecurity { @Test public void testDelegationToken() throws Exception { - org.apache.log4j.Logger rootLogger = LogManager.getRootLogger(); - rootLogger.setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); final YarnConfiguration conf = new YarnConfiguration(new JobConf()); // Just a random principle diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index 43d3abe4f8d..f653ce7c0cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -99,7 +99,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; -import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -557,9 +556,9 @@ public class TestMRJobs { systemClasses); } sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB); - sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); - sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); - sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL"); sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class"); final SleepJob sleepJob = new SleepJob(); sleepJob.setConf(sleepConf); @@ -856,11 +855,11 @@ public class TestMRJobs { final SleepJob sleepJob = new SleepJob(); final JobConf sleepConf = new JobConf(mrCluster.getConfig()); - sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL"); final long userLogKb = 4; sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb); sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3); - sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL"); final long amLogKb = 7; sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb); sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7); diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c4dfd2f9d7c..3ebab5a30b8 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1944,6 +1944,18 @@ log4j-web ${log4j2.version} + + org.apache.hadoop + hadoop-logging + ${hadoop.version} + + + org.apache.hadoop + hadoop-logging + ${hadoop.version} + test + test-jar + diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index e8c5fb78efd..373b5a07df1 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -349,7 +349,12 @@ hamcrest-library test - + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java index 1e7330fbd0b..2a124c1c99d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Before; import org.junit.Rule; import org.junit.Test; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java index 476d7a4f01e..6acab8fe2a0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java @@ -23,7 +23,7 @@ import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.slf4j.Logger; diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 5194e51d81f..06c2e192f08 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -81,6 +81,12 @@ hadoop-hdfs-client provided + + org.apache.hadoop + hadoop-logging + test + test-jar + org.apache.hadoop hadoop-hdfs diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index aa42cb968d6..d54fbaa86f2 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; @@ -701,8 +702,8 @@ public abstract class AbstractContractDistCpTest GenericTestUtils .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth()); - GenericTestUtils.LogCapturer log = - GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG); + LogCapturer log = + LogCapturer.captureLogs(SimpleCopyListing.LOG); String options = "-useiterator -update -delete" + getDefaultCLIOptions(); DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java index 02fd48a071b..661573f9d85 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java @@ -27,11 +27,10 @@ import java.util.regex.Pattern; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.tools.rumen.datatypes.*; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; /** * A default parser for MapReduce job configuration properties. @@ -83,7 +82,7 @@ public class MapReduceJobPropertiesParser implements JobPropertyParser { // turn off the warning w.r.t deprecated mapreduce keys static { - Logger.getLogger(Configuration.class).setLevel(Level.OFF); + HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF"); } // Accepts a key if there is a corresponding key in the current mapreduce diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 81e888472d8..d901513f2c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -576,16 +576,6 @@ - - - - - - - - - - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index a15c78e4267..b41923ef9de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -126,7 +127,6 @@ import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.TimelineServiceHelper; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import org.apache.log4j.LogManager; import org.apache.hadoop.classification.VisibleForTesting; import com.sun.jersey.api.client.ClientHandlerException; @@ -403,7 +403,7 @@ public class ApplicationMaster { result = appMaster.finish(); } catch (Throwable t) { LOG.error("Error running ApplicationMaster", t); - LogManager.shutdown(); + HadoopLoggerUtils.shutdownLogManager(); ExitUtil.terminate(1, t); } finally { if (appMaster != null) { @@ -529,7 +529,7 @@ public class ApplicationMaster { //Check whether customer log4j.properties file exists if (fileExist(log4jPath)) { try { - Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, + HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 098f3981cfd..dc23682f1a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -451,7 +452,7 @@ public class Client { if (cliParser.hasOption("log_properties")) { String log4jPath = cliParser.getOptionValue("log_properties"); try { - Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath); + HadoopLoggerUtils.updateLog4jConfiguration(Client.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java deleted file mode 100644 index 0301a6880f8..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.applications.distributedshell; - -import java.io.FileInputStream; -import java.io.InputStream; -import java.util.Map.Entry; -import java.util.Properties; - -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; - -public class Log4jPropertyHelper { - - public static void updateLog4jConfiguration(Class targetClass, - String log4jPath) throws Exception { - Properties customProperties = new Properties(); - try ( - FileInputStream fs = new FileInputStream(log4jPath); - InputStream is = targetClass.getResourceAsStream("/log4j.properties")) { - customProperties.load(fs); - Properties originalProperties = new Properties(); - originalProperties.load(is); - for (Entry entry : customProperties.entrySet()) { - originalProperties.setProperty(entry.getKey().toString(), entry - .getValue().toString()); - } - LogManager.resetConfiguration(); - PropertyConfigurator.configure(originalProperties); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java index 60c06e9aa75..553465313d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java @@ -43,7 +43,6 @@ import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.client.util.YarnClientUtils; -import org.apache.log4j.Logger; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.servlet.ServletContextHandler; @@ -52,6 +51,8 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Test Spnego Client Login. @@ -76,8 +77,7 @@ public class TestSecureApiServiceClient extends KerberosSecurityTestcase { private Map props; private static Server server; - private static Logger LOG = Logger - .getLogger(TestSecureApiServiceClient.class); + private static Logger LOG = LoggerFactory.getLogger(TestSecureApiServiceClient.class); private ApiServiceClient asc; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java index f8f948dd88f..52ae87671a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java @@ -30,7 +30,6 @@ import org.apache.hadoop.yarn.service.api.records.ServiceState; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType; -import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -49,8 +48,6 @@ import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants */ public class TestComponent { - static final Logger LOG = Logger.getLogger(TestComponent.class); - @Rule public ServiceTestUtils.ServiceFSWatcher rule = new ServiceTestUtils.ServiceFSWatcher(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java index fa5a5870c4e..4fc87d95b6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; @@ -40,7 +41,6 @@ import org.apache.hadoop.yarn.logaggregation.filecontroller.tfile.LogAggregation import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcase; import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder; import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder.AppDescriptor; -import org.apache.log4j.Level; import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT; import static org.apache.hadoop.yarn.logaggregation.LogAggregationTestUtils.enableFileControllers; @@ -67,7 +67,7 @@ public class TestAggregatedLogDeletionService { @BeforeAll public static void beforeClass() { - org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); } @BeforeEach diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java index 346239f8e1b..0fd2841fcd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java @@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory; import org.slf4j.Marker; import org.slf4j.MarkerFactory; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.LogManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java index 6b0570a32e1..c04fba0a17c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java @@ -22,7 +22,7 @@ import com.google.inject.Inject; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java index 41285467489..05031adc5cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java @@ -24,7 +24,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.GenericsUtil; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.security.AdminACLsManager; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java index 87d511b1725..8e24e8cd6b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java index c8496193933..12b6dd7f691 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java @@ -50,11 +50,12 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertEquals; @@ -63,8 +64,7 @@ import static org.junit.Assert.assertFalse; public class TestContainersMonitorResourceChange { - static final Logger LOG = Logger - .getLogger(TestContainersMonitorResourceChange.class); + static final Logger LOG = LoggerFactory.getLogger(TestContainersMonitorResourceChange.class); private ContainersMonitorImpl containersMonitor; private MockExecutor executor; private Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 9d096d20c5f..7ea8a6209e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -245,6 +245,13 @@ test + + org.apache.hadoop + hadoop-logging + test + test-jar + + org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java index dc69eba2bbb..80cc9fc8fd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java @@ -32,7 +32,6 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; -import org.apache.log4j.Logger; import java.util.Collections; import java.util.HashMap; @@ -42,6 +41,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.LongBinaryOperator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * In-memory mapping between applications/container-tags and nodes/racks. * Required by constrained affinity/anti-affinity and cardinality placement. @@ -50,8 +52,7 @@ import java.util.function.LongBinaryOperator; @InterfaceStability.Unstable public class AllocationTagsManager { - private static final Logger LOG = Logger.getLogger( - AllocationTagsManager.class); + private static final Logger LOG = LoggerFactory.getLogger(AllocationTagsManager.class); private ReentrantReadWriteLock.ReadLock readLock; private ReentrantReadWriteLock.WriteLock writeLock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java index c17d4f6d7b0..15e2d34b001 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java @@ -22,7 +22,7 @@ import com.google.inject.Inject; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java index 9a85315628f..12b017a921b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -231,8 +232,8 @@ public class TestFederationRMStateStoreService { conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INITIAL_DELAY, 10); conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId()); - GenericTestUtils.LogCapturer logCapture = - GenericTestUtils.LogCapturer.captureLogs(FederationStateStoreService.LOG); + LogCapturer logCapture = + LogCapturer.captureLogs(FederationStateStoreService.LOG); final MockRM rm = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java index a1989d5c0c2..dc2d18d5526 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java @@ -28,17 +28,13 @@ import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -83,6 +79,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.LoggerFactory; public class TestSystemMetricsPublisherForV2 { @@ -301,42 +298,15 @@ public class TestSystemMetricsPublisherForV2 { @Test(timeout = 10000) public void testPutEntityWhenNoCollector() throws Exception { // Validating the logs as DrainDispatcher won't throw exception - class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } - - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { RMApp app = createRMApp(ApplicationId.newInstance(0, 1)); metricsPublisher.appCreated(app, app.getStartTime()); dispatcher.await(); - for (LoggingEvent event : appender.getLog()) { - assertFalse("Dispatcher Crashed", - event.getRenderedMessage().contains("Error in dispatcher thread")); - } + assertFalse("Dispatcher Crashed", + logCapturer.getOutput().contains("Error in dispatcher thread")); } finally { - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java index 2e7b01ed50d..07630f54618 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java @@ -18,12 +18,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.junit.Test; import java.io.IOException; import java.util.Map; @@ -157,7 +156,7 @@ public class TestProportionalCapacityPreemptionPolicyPreemptToBalance @Test public void testPreemptionToBalanceWithVcoreResource() throws IOException { - Logger.getRootLogger().setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); String labelsConfig = "=100:100,true"; // default partition String nodesConfig = "n1="; // only one node String queuesConfig = @@ -195,7 +194,7 @@ public class TestProportionalCapacityPreemptionPolicyPreemptToBalance @Test public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException { - Logger.getRootLogger().setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); String labelsConfig = "=100:100,true"; // default partition String nodesConfig = "n1="; // only one node String queuesConfig = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java index 024ec86f7d7..c6066fd2085 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -16,6 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions; import org.slf4j.Logger; @@ -110,8 +111,7 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { public void setup() { resetResourceInformationMap(); - org.apache.log4j.Logger.getRootLogger().setLevel( - org.apache.log4j.Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); conf = new CapacitySchedulerConfiguration(new Configuration(false)); conf.setLong( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java index 6aaa15f3e18..c5add68f8ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java @@ -25,9 +25,10 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; -import org.apache.log4j.Logger; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static junit.framework.TestCase.fail; @@ -37,8 +38,7 @@ import static junit.framework.TestCase.fail; * the invariant throws in case the invariants are not respected. */ public class TestMetricsInvariantChecker { - public final static Logger LOG = - Logger.getLogger(TestMetricsInvariantChecker.class); + public final static Logger LOG = LoggerFactory.getLogger(TestMetricsInvariantChecker.class); private MetricsSystem metricsSystem; private MetricsInvariantChecker ic; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java index 38fbcd84153..68bbc94f97f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -29,19 +30,13 @@ import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProv import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import org.slf4j.LoggerFactory; import java.util.Collections; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue; import static org.junit.Assert.assertEquals; @@ -54,29 +49,6 @@ public class TestFairSchedulerConfiguration { private static final String A_CUSTOM_RESOURCE = "a-custom-resource"; - private static class TestAppender extends AppenderSkeleton { - - private final List logEvents = new CopyOnWriteArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } - - @Override - protected void append(LoggingEvent arg0) { - logEvents.add(arg0); - } - - private List getLogEvents() { - return logEvents; - } - } - @Rule public ExpectedException exception = ExpectedException.none(); @@ -751,9 +723,7 @@ public class TestFairSchedulerConfiguration { @Test public void testMemoryIncrementConfiguredViaMultipleProperties() { - TestAppender testAppender = new TestAppender(); - Logger logger = LogManager.getRootLogger(); - logger.addAppender(testAppender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-mb", "7"); @@ -763,23 +733,19 @@ public class TestFairSchedulerConfiguration { FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13L, increment.getMemorySize()); - assertTrue("Warning message is not logged when specifying memory " + - "increment via multiple properties", - testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "yarn.resource-types.memory-mb.increment-allocation=13 is " + - "overriding the yarn.scheduler.increment-allocation-mb=7 " + - "property").equals(e.getMessage()))); + assertTrue("Warning message is not logged when specifying memory " + + "increment via multiple properties", logCapturer.getOutput().contains("Configuration " + + "yarn.resource-types.memory-mb.increment-allocation=13 is " + + "overriding the yarn.scheduler.increment-allocation-mb=7 " + + "property")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } @Test public void testCpuIncrementConfiguredViaMultipleProperties() { - TestAppender testAppender = new TestAppender(); - Logger logger = LogManager.getRootLogger(); - logger.addAppender(testAppender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-vcores", "7"); @@ -789,15 +755,13 @@ public class TestFairSchedulerConfiguration { FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13, increment.getVirtualCores()); - assertTrue("Warning message is not logged when specifying CPU vCores " + - "increment via multiple properties", - testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "yarn.resource-types.vcores.increment-allocation=13 is " + - "overriding the yarn.scheduler.increment-allocation-vcores=7 " + - "property").equals(e.getMessage()))); + assertTrue("Warning message is not logged when specifying CPU vCores " + + "increment via multiple properties", logCapturer.getOutput().contains("Configuration " + + "yarn.resource-types.vcores.increment-allocation=13 is " + + "overriding the yarn.scheduler.increment-allocation-vcores=7 " + + "property")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } }