diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index 9a060f75028..4deda432797 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -46,6 +46,16 @@ slf4j-api compile + + log4j + log4j + runtime + + + org.slf4j + slf4j-log4j12 + runtime + diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 4cdd6006a46..433a615c606 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -82,14 +82,14 @@ compile - org.apache.hadoop - hadoop-logging + log4j + log4j + runtime - org.apache.hadoop - hadoop-logging - test - test-jar + org.slf4j + slf4j-log4j12 + runtime org.apache.hadoop diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java index e18982d75ff..f9c922caac8 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java @@ -15,7 +15,8 @@ package org.apache.hadoop.security.authentication.util; import java.util.Random; -import org.apache.hadoop.logging.HadoopLoggerUtils; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; import org.junit.Assert; import org.junit.Test; @@ -29,8 +30,9 @@ public class TestRandomSignerSecretProvider { private final int timeout = 500; private final long rolloverFrequency = timeout / 2; - static { - HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG"); + { + LogManager.getLogger( + RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG); } @Test diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java index d81d1eb3359..628342e40dc 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java @@ -19,7 +19,8 @@ import java.util.Random; import javax.servlet.ServletContext; import org.apache.curator.test.TestingServer; -import org.apache.hadoop.logging.HadoopLoggerUtils; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -38,8 +39,9 @@ public class TestZKSignerSecretProvider { private final int timeout = 100; private final long rolloverFrequency = timeout / 2; - static { - HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG"); + { + LogManager.getLogger( + RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG); } @Before diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 426f7a4af41..a9e15d004d4 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -419,16 +419,6 @@ lz4-java provided - - org.apache.hadoop - hadoop-logging - - - org.apache.hadoop - hadoop-logging - test - test-jar - diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 086665151e8..b4eec1fe2cc 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -299,7 +299,7 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex} yarn.ewma.cleanupInterval=300 yarn.ewma.messageAgeLimitSeconds=86400 yarn.ewma.maxUniqueMessages=250 -log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender +log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index cf090eea009..32879597a9c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -42,7 +42,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.http.HttpServer2; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.ssl.SSLFactory; @@ -51,6 +50,8 @@ import org.apache.hadoop.util.GenericsUtil; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; /** * Change log level in runtime. @@ -348,7 +349,7 @@ public class LogLevel { } if (GenericsUtil.isLog4jLogger(logName)) { - process(logName, level, out); + process(Logger.getLogger(logName), level, out); } else { out.println("Sorry, setting log level is only supported for log4j loggers.
"); } @@ -367,17 +368,19 @@ public class LogLevel { + "" + ""; - private static void process(String log, String level, PrintWriter out) { + private static void process(Logger log, String level, + PrintWriter out) throws IOException { if (level != null) { - try { - HadoopLoggerUtils.setLogLevel(log, level); - out.println(MARKER + "Setting Level to " + level + " ...
"); - } catch (IllegalArgumentException e) { + if (!level.equalsIgnoreCase(Level.toLevel(level) + .toString())) { out.println(MARKER + "Bad Level : " + level + "
"); + } else { + log.setLevel(Level.toLevel(level)); + out.println(MARKER + "Setting Level to " + level + " ...
"); } } - out.println(MARKER + "Effective Level: " + HadoopLoggerUtils.getEffectiveLevel(log) - + "
"); + out.println(MARKER + + "Effective Level: " + log.getEffectiveLevel() + "
"); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 3c13feac3ed..3debd36da78 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -40,8 +40,8 @@ import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.net.NetUtils; +import org.apache.log4j.LogManager; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -761,7 +761,7 @@ public class StringUtils { public void run() { log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); - HadoopLoggerUtils.shutdownLogManager(); + LogManager.shutdown(); } }, SHUTDOWN_HOOK_PRIORITY); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 913826f3eed..b3487ef309f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -68,7 +68,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.alias.CredentialProvider; import org.apache.hadoop.security.alias.CredentialProviderFactory; @@ -77,8 +76,10 @@ import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.mockito.Mockito; -import org.slf4j.LoggerFactory; public class TestConfiguration { @@ -219,7 +220,9 @@ public class TestConfiguration { InputStream in2 = new ByteArrayInputStream(bytes2); // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); try { // Add the 2 different resources - this should generate a warning @@ -227,13 +230,17 @@ public class TestConfiguration { conf.addResource(in2); assertEquals("should see the first value", "A", conf.get("prop")); - String renderedMessage = logCapturer.getOutput(); - assertTrue("did not see expected string inside message " + renderedMessage, - renderedMessage.contains( - "an attempt to override final parameter: " + "prop; Ignoring.")); + List events = appender.getLog(); + assertEquals("overriding a final parameter should cause logging", 1, + events.size()); + LoggingEvent loggingEvent = events.get(0); + String renderedMessage = loggingEvent.getRenderedMessage(); + assertTrue("did not see expected string inside message "+ renderedMessage, + renderedMessage.contains("an attempt to override final parameter: " + + "prop; Ignoring.")); } finally { // Make sure the appender is removed - logCapturer.stopCapturing(); + logger.removeAppender(appender); } } @@ -251,7 +258,9 @@ public class TestConfiguration { InputStream in2 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); try { // Add the resource twice from a stream - should not generate warnings @@ -259,15 +268,20 @@ public class TestConfiguration { conf.addResource(in2); assertEquals("A", conf.get("prop")); - String appenderOutput = logCapturer.getOutput(); + List events = appender.getLog(); + for (LoggingEvent loggingEvent : events) { + System.out.println("Event = " + loggingEvent.getRenderedMessage()); + } assertTrue("adding same resource twice should not cause logging", - appenderOutput.isEmpty()); + events.isEmpty()); } finally { // Make sure the appender is removed - logCapturer.stopCapturing(); + logger.removeAppender(appender); } } + + @Test public void testFinalWarningsMultiple() throws Exception { // Make a configuration file with a repeated final property @@ -281,19 +295,24 @@ public class TestConfiguration { InputStream in1 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); try { // Add the resource - this should not produce a warning conf.addResource(in1); assertEquals("should see the value", "A", conf.get("prop")); - String appenderOutput = logCapturer.getOutput(); + List events = appender.getLog(); + for (LoggingEvent loggingEvent : events) { + System.out.println("Event = " + loggingEvent.getRenderedMessage()); + } assertTrue("adding same resource twice should not cause logging", - appenderOutput.isEmpty()); + events.isEmpty()); } finally { // Make sure the appender is removed - logCapturer.stopCapturing(); + logger.removeAppender(appender); } } @@ -310,20 +329,48 @@ public class TestConfiguration { InputStream in1 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); try { // Add the resource - this should produce a warning conf.addResource(in1); assertEquals("should see the value", "A", conf.get("prop")); - String renderedMessage = logCapturer.getOutput(); - assertTrue("did not see expected string inside message " + renderedMessage, - renderedMessage.contains( - "an attempt to override final parameter: " + "prop; Ignoring.")); + List events = appender.getLog(); + assertEquals("overriding a final parameter should cause logging", 1, + events.size()); + LoggingEvent loggingEvent = events.get(0); + String renderedMessage = loggingEvent.getRenderedMessage(); + assertTrue("did not see expected string inside message "+ renderedMessage, + renderedMessage.contains("an attempt to override final parameter: " + + "prop; Ignoring.")); } finally { // Make sure the appender is removed - logCapturer.stopCapturing(); + logger.removeAppender(appender); + } + } + + /** + * A simple appender for white box testing. + */ + private static class TestAppender extends AppenderSkeleton { + private final List log = new ArrayList<>(); + + @Override public boolean requiresLayout() { + return false; + } + + @Override protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override public void close() { + } + + public List getLog() { + return new ArrayList<>(log); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java index 9e4405f6d18..c016ff03789 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java @@ -36,9 +36,8 @@ import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater; import org.apache.hadoop.io.compress.zlib.ZlibCompressor; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.util.NativeCodeLoader; +import org.apache.log4j.Logger; import org.junit.Assert; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -48,6 +47,9 @@ import static org.junit.Assert.*; public class CompressDecompressTester { + private static final Logger logger = Logger + .getLogger(CompressDecompressTester.class); + private final byte[] originalRawData; private ImmutableList> pairs = ImmutableList.of(); @@ -486,12 +488,12 @@ public class CompressDecompressTester future = null; private AtomicBoolean hasError = null; - private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class); + private static final Logger LOG = Logger.getLogger(SourceUpdater.class); public SourceUpdater(MetricsSourceAdapter sourceAdapter, AtomicBoolean err) { @@ -264,7 +263,7 @@ public class TestMetricsSourceAdapter { } catch (Exception e) { // catch all errors hasError.set(true); - LOG.error("Something went wrong.", e); + LOG.error(e.getStackTrace()); } finally { if (hasError.get()) { LOG.error("Hit error, stopping now"); @@ -285,7 +284,7 @@ public class TestMetricsSourceAdapter { private int cnt = 0; private ScheduledFuture future = null; private AtomicBoolean hasError = null; - private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class); + private static final Logger LOG = Logger.getLogger(SourceReader.class); public SourceReader( TestMetricsSource source, MetricsSourceAdapter sourceAdapter, @@ -319,7 +318,7 @@ public class TestMetricsSourceAdapter { } catch (Exception e) { // catch other errors hasError.set(true); - LOG.error("Something went wrong.", e); + LOG.error(e.getStackTrace()); } finally { if (hasError.get()) { future.cancel(false); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java index b1399712e66..8c1339d38d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ExitCodeException; @@ -41,8 +41,8 @@ public class TestShellBasedUnixGroupsMapping { private static final Logger TESTLOG = LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class); - private final LogCapturer shellMappingLog = - LogCapturer.captureLogs( + private final GenericTestUtils.LogCapturer shellMappingLog = + GenericTestUtils.LogCapturer.captureLogs( ShellBasedUnixGroupsMapping.LOG); private class TestGroupUserNotExist diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java index 6a6fff89c16..a0ce721ecf0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -19,8 +19,6 @@ package org.apache.hadoop.security.ssl; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; - import org.junit.BeforeClass; import org.junit.Test; @@ -44,7 +42,7 @@ public class TestReloadingX509KeyManager { private static final String BASEDIR = GenericTestUtils.getTempPath( TestReloadingX509TrustManager.class.getSimpleName()); - private final LogCapturer reloaderLog = LogCapturer.captureLogs( + private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( FileMonitoringTimerTask.LOG); @BeforeClass diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 8d2a4c78f5f..63589592f35 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -19,7 +19,7 @@ package org.apache.hadoop.security.ssl; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import java.util.function.Supplier; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java index 839c51c5e10..b7b86b7aa0d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java @@ -18,7 +18,7 @@ package org.apache.hadoop.service; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import java.io.PrintWriter; -import static org.apache.hadoop.logging.LogCapturer.captureLogs; +import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 825fc706f49..e54971e491c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; +import java.io.StringWriter; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; @@ -37,6 +38,7 @@ import java.util.Locale; import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.Enumeration; import java.util.TreeSet; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -51,11 +53,17 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.log4j.Appender; +import org.apache.log4j.Layout; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.WriterAppender; import org.junit.Assert; import org.junit.Assume; import org.mockito.invocation.InvocationOnMock; @@ -107,17 +115,51 @@ public abstract class GenericTestUtils { public static final String ERROR_INVALID_ARGUMENT = "Total wait time should be greater than check interval time"; + @Deprecated + public static Logger toLog4j(org.slf4j.Logger logger) { + return LogManager.getLogger(logger.getName()); + } + + /** + * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead + */ + @Deprecated + public static void disableLog(Logger logger) { + logger.setLevel(Level.OFF); + } + public static void disableLog(org.slf4j.Logger logger) { - HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF"); + disableLog(toLog4j(logger)); + } + + public static void setLogLevel(Logger logger, Level level) { + logger.setLevel(level); + } + + /** + * @deprecated + * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead + */ + @Deprecated + public static void setLogLevel(org.slf4j.Logger logger, Level level) { + setLogLevel(toLog4j(logger), level); } public static void setLogLevel(org.slf4j.Logger logger, org.slf4j.event.Level level) { - HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString()); + setLogLevel(toLog4j(logger), Level.toLevel(level.toString())); } public static void setRootLogLevel(org.slf4j.event.Level level) { - HadoopLoggerUtils.setLogLevel("root", level.toString()); + setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString())); + } + + public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) { + for (Enumeration loggers = LogManager.getCurrentLoggers(); + loggers.hasMoreElements();) { + Logger logger = (Logger) loggers.nextElement(); + logger.setLevel(Level.toLevel(level.toString())); + } } public static org.slf4j.event.Level toLevel(String level) { @@ -429,6 +471,47 @@ public abstract class GenericTestUtils { } } + public static class LogCapturer { + private StringWriter sw = new StringWriter(); + private WriterAppender appender; + private Logger logger; + + public static LogCapturer captureLogs(org.slf4j.Logger logger) { + if (logger.getName().equals("root")) { + return new LogCapturer(org.apache.log4j.Logger.getRootLogger()); + } + return new LogCapturer(toLog4j(logger)); + } + + public static LogCapturer captureLogs(Logger logger) { + return new LogCapturer(logger); + } + + private LogCapturer(Logger logger) { + this.logger = logger; + Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); + if (defaultAppender == null) { + defaultAppender = Logger.getRootLogger().getAppender("console"); + } + final Layout layout = (defaultAppender == null) ? new PatternLayout() : + defaultAppender.getLayout(); + this.appender = new WriterAppender(layout, sw); + logger.addAppender(this.appender); + } + + public String getOutput() { + return sw.toString(); + } + + public void stopCapturing() { + logger.removeAppender(appender); + } + + public void clearOutput() { + sw.getBuffer().setLength(0); + } + } + /** * Mockito answer helper that triggers one latch as soon as the * method is called, then waits on another before continuing. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java index f6f4a448e0e..8489e3d24f3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java @@ -26,8 +26,6 @@ import org.slf4j.LoggerFactory; import java.util.function.Supplier; import org.slf4j.event.Level; -import org.apache.hadoop.logging.LogCapturer; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java index 8375864e5fd..98e182236c9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java @@ -22,8 +22,8 @@ import java.io.File; import org.junit.Assert; +import org.apache.log4j.Logger; import org.junit.Test; -import org.slf4j.Logger; public class TestClassUtil { @Test(timeout=10000) @@ -35,6 +35,6 @@ public class TestClassUtil { Assert.assertTrue("Containing jar does not exist on file system ", jarFile.exists()); Assert.assertTrue("Incorrect jar file " + containingJar, - jarFile.getName().matches("slf4j-api.*[.]jar")); + jarFile.getName().matches("log4j.*[.]jar")); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index ec26af66017..1d1ce893a97 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -28,7 +28,7 @@ import java.util.List; import static org.junit.Assert.*; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java index fb6221f2704..f43930dd07a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java @@ -28,12 +28,10 @@ import java.util.Iterator; import java.util.Random; import org.junit.Assert; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.util.hash.Hash; +import org.apache.log4j.Logger; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; @@ -115,7 +113,7 @@ public class BloomFilterCommonTester { } interface FilterTesterStrategy { - Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class); + final Logger logger = Logger.getLogger(FilterTesterStrategy.class); void assertWhat(Filter filter, int numInsertions, int hashId, ImmutableSet falsePositives); diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 8a04c4ebcfb..96588a22b94 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -53,12 +53,6 @@ hadoop-auth compile - - org.apache.hadoop - hadoop-logging - test - test-jar - org.apache.hadoop.thirdparty hadoop-shaded-guava diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 97d854285ff..f4c7fbe0b3c 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -49,7 +49,6 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.apache.http.client.utils.URIBuilder; import org.junit.After; @@ -584,8 +583,8 @@ public class TestKMS { @Test public void testStartStopHttpPseudo() throws Exception { // Make sure bogus errors don't get emitted. - LogCapturer logs = - LogCapturer.captureLogs(LoggerFactory.getLogger( + GenericTestUtils.LogCapturer logs = + GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator")); try { testStartStop(false, false); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java index 6e12d946ff3..3d0fd7de642 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java @@ -18,24 +18,23 @@ package org.apache.hadoop.crypto.key.kms.server; import java.io.ByteArrayOutputStream; -import java.io.File; import java.io.FilterOutputStream; +import java.io.InputStream; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.Paths; import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp; -import org.apache.hadoop.logging.HadoopLoggerUtils; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; - +import org.apache.hadoop.util.ThreadUtil; +import org.apache.log4j.LogManager; +import org.apache.log4j.PropertyConfigurator; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -68,23 +67,24 @@ public class TestKMSAudit { public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS); @Before - public void setUp() throws IOException, URISyntaxException { + public void setUp() throws IOException { originalOut = System.err; memOut = new ByteArrayOutputStream(); filterOut = new FilterOut(memOut); capturedOut = new PrintStream(filterOut); System.setErr(capturedOut); - URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties"); - File file = Paths.get(url.toURI()).toFile(); - HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath()); + InputStream is = + ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties"); + PropertyConfigurator.configure(is); + IOUtils.closeStream(is); Configuration conf = new Configuration(); this.kmsAudit = new KMSAudit(conf); } @After - public void cleanUp() throws Exception { + public void cleanUp() { System.setErr(originalOut); - HadoopLoggerUtils.resetConfiguration(); + LogManager.resetConfiguration(); kmsAudit.shutdown(); } diff --git a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 304d1e45157..00000000000 --- a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/hadoop-common-project/hadoop-logging/pom.xml b/hadoop-common-project/hadoop-logging/pom.xml deleted file mode 100644 index 20af2bee769..00000000000 --- a/hadoop-common-project/hadoop-logging/pom.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - - - - hadoop-project - org.apache.hadoop - 3.4.0-SNAPSHOT - ../../hadoop-project - - 4.0.0 - - hadoop-logging - 3.4.0-SNAPSHOT - jar - - Apache Hadoop Logging - Logging Support for Apache Hadoop project - - - UTF-8 - - - - - org.apache.hadoop - hadoop-annotations - provided - - - org.apache.commons - commons-lang3 - - - org.slf4j - slf4j-api - - - junit - junit - test - - - org.slf4j - slf4j-log4j12 - test - - - log4j - log4j - provided - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - prepare-package - - jar - - - - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - prepare-jar - prepare-package - - jar - - - - prepare-test-jar - prepare-package - - test-jar - - - - - - org.apache.rat - apache-rat-plugin - - - dev-support/findbugsExcludeFile.xml - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - - \ No newline at end of file diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java deleted file mode 100644 index b0bd2e31fcd..00000000000 --- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.logging; - -import java.io.FileInputStream; -import java.io.Flushable; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.Enumeration; -import java.util.Map; -import java.util.Properties; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PropertyConfigurator; - -/** - * Hadoop's internal class that access log4j APIs directly. - *

- * This class will depend on log4j directly, so callers should not use this class directly to avoid - * introducing log4j dependencies to downstream users. Please call the methods in - * {@link HadoopLoggerUtils}, as they will call the methods here through reflection. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -final class HadoopInternalLog4jUtils { - - private HadoopInternalLog4jUtils() { - } - - static void setLogLevel(String loggerName, String levelName) { - if (loggerName == null) { - throw new IllegalArgumentException("logger name cannot be null"); - } - Logger logger = loggerName.equalsIgnoreCase("root") ? - LogManager.getRootLogger() : - LogManager.getLogger(loggerName); - Level level = Level.toLevel(levelName.toUpperCase()); - if (!level.toString().equalsIgnoreCase(levelName)) { - throw new IllegalArgumentException("Unsupported log level " + levelName); - } - logger.setLevel(level); - } - - static void shutdownLogManager() { - LogManager.shutdown(); - } - - static String getEffectiveLevel(String loggerName) { - Logger logger = loggerName.equalsIgnoreCase("root") ? - LogManager.getRootLogger() : - LogManager.getLogger(loggerName); - return logger.getEffectiveLevel().toString(); - } - - static void resetConfiguration() { - LogManager.resetConfiguration(); - } - - static void updateLog4jConfiguration(Class targetClass, String log4jPath) throws Exception { - Properties customProperties = new Properties(); - try (FileInputStream fs = new FileInputStream(log4jPath); - InputStream is = targetClass.getResourceAsStream("/log4j.properties")) { - customProperties.load(fs); - Properties originalProperties = new Properties(); - originalProperties.load(is); - for (Map.Entry entry : customProperties.entrySet()) { - originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString()); - } - LogManager.resetConfiguration(); - PropertyConfigurator.configure(originalProperties); - } - } - - static boolean hasAppenders(String logger) { - return Logger.getLogger(logger) - .getAllAppenders() - .hasMoreElements(); - } - - @SuppressWarnings("unchecked") - static void syncLogs() { - // flush standard streams - // - System.out.flush(); - System.err.flush(); - - // flush flushable appenders - // - final Logger rootLogger = Logger.getRootLogger(); - flushAppenders(rootLogger); - final Enumeration allLoggers = rootLogger.getLoggerRepository(). - getCurrentLoggers(); - while (allLoggers.hasMoreElements()) { - final Logger l = allLoggers.nextElement(); - flushAppenders(l); - } - } - - @SuppressWarnings("unchecked") - private static void flushAppenders(Logger l) { - final Enumeration allAppenders = l.getAllAppenders(); - while (allAppenders.hasMoreElements()) { - final Appender a = allAppenders.nextElement(); - if (a instanceof Flushable) { - try { - ((Flushable) a).flush(); - } catch (IOException ioe) { - System.err.println(a + ": Failed to flush!" - + stringifyException(ioe)); - } - } - } - } - - private static String stringifyException(Throwable e) { - StringWriter stringWriter = new StringWriter(); - PrintWriter printWriter = new PrintWriter(stringWriter); - e.printStackTrace(printWriter); - printWriter.close(); - return stringWriter.toString(); - } - -} diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java deleted file mode 100644 index 1d0bea17337..00000000000 --- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.logging; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * A bridge class for operating on logging framework, such as changing log4j log level, etc. - * Will call the methods in {@link HadoopInternalLog4jUtils} to perform operations on log4j level. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public final class HadoopLoggerUtils { - - private static final String INTERNAL_UTILS_CLASS = - "org.apache.hadoop.logging.HadoopInternalLog4jUtils"; - - private HadoopLoggerUtils() { - } - - private static Method getMethod(String methodName, Class... args) { - try { - Class clazz = Class.forName(INTERNAL_UTILS_CLASS); - return clazz.getDeclaredMethod(methodName, args); - } catch (ClassNotFoundException | NoSuchMethodException e) { - throw new AssertionError("should not happen", e); - } - } - - private static void throwUnchecked(Throwable throwable) { - if (throwable instanceof RuntimeException) { - throw (RuntimeException) throwable; - } - if (throwable instanceof Error) { - throw (Error) throwable; - } - } - - public static void shutdownLogManager() { - Method method = getMethod("shutdownLogManager"); - try { - method.invoke(null); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - - public static void setLogLevel(String loggerName, String levelName) { - Method method = getMethod("setLogLevel", String.class, String.class); - try { - method.invoke(null, loggerName, levelName); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - - public static String getEffectiveLevel(String loggerName) { - Method method = getMethod("getEffectiveLevel", String.class); - try { - return (String) method.invoke(null, loggerName); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - - public static void resetConfiguration() { - Method method = getMethod("resetConfiguration"); - try { - method.invoke(null); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - - public static void updateLog4jConfiguration(Class targetClass, String log4jPath) { - Method method = getMethod("updateLog4jConfiguration", Class.class, String.class); - try { - method.invoke(null, targetClass, log4jPath); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - - public static boolean hasAppenders(String logger) { - Method method = getMethod("hasAppenders", String.class); - try { - return (Boolean) method.invoke(null, logger); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - - public synchronized static void syncLogs() { - Method method = getMethod("syncLogs"); - try { - method.invoke(null); - } catch (IllegalAccessException e) { - throw new AssertionError("should not happen", e); - } catch (InvocationTargetException e) { - throwUnchecked(e.getCause()); - throw new AssertionError("Failed to execute, should not happen", e.getCause()); - } - } - -} diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java deleted file mode 100644 index 45f5d0ca02d..00000000000 --- a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.logging; - -import java.io.StringWriter; - -import org.apache.log4j.Appender; -import org.apache.log4j.Layout; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; - -public class LogCapturer { - private final StringWriter sw = new StringWriter(); - private final Appender appender; - private final Logger logger; - - public static LogCapturer captureLogs(org.slf4j.Logger logger) { - if (logger.getName().equals("root")) { - return new LogCapturer(Logger.getRootLogger()); - } - return new LogCapturer(LogManager.getLogger(logger.getName())); - } - - private LogCapturer(Logger logger) { - this.logger = logger; - Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); - if (defaultAppender == null) { - defaultAppender = Logger.getRootLogger().getAppender("console"); - } - final Layout layout = - (defaultAppender == null) ? new PatternLayout() : defaultAppender.getLayout(); - this.appender = new WriterAppender(layout, sw); - logger.addAppender(this.appender); - } - - public String getOutput() { - return sw.toString(); - } - - public void stopCapturing() { - logger.removeAppender(appender); - } - - public void clearOutput() { - sw.getBuffer().setLength(0); - } -} diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java deleted file mode 100644 index 4bafb5a3153..00000000000 --- a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.logging.test; - -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.logging.HadoopLoggerUtils; - -public class TestSyncLogs { - - private static final Logger LOG = LoggerFactory.getLogger(TestSyncLogs.class); - - @Test - public void testSyncLogs() { - LOG.info("Testing log sync"); - HadoopLoggerUtils.syncLogs(); - } - -} diff --git a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties deleted file mode 100644 index ff1468cf43e..00000000000 --- a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# log4j configuration used during build and unit tests - -log4j.rootLogger=debug,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index d2e993343a2..c292aebbe36 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -38,6 +38,11 @@ org.apache.kerby kerb-simplekdc + + org.slf4j + slf4j-log4j12 + compile + junit junit diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index b0fb88874c9..f167a079a9b 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -38,7 +38,6 @@ hadoop-minikdc hadoop-kms hadoop-registry - hadoop-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 9a1226ea385..b362e001ea6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -86,12 +86,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> netty-all test - - org.apache.hadoop - hadoop-logging - test - test-jar - org.mock-server mockserver-netty diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index d0b86534269..1fe6dcad932 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -31,7 +31,6 @@ import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONIT import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Test; @@ -62,8 +61,8 @@ public final class TestURLConnectionFactory { public void testSSLInitFailure() throws Exception { Configuration conf = new Configuration(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo"); - LogCapturer logs = - LogCapturer.captureLogs( + GenericTestUtils.LogCapturer logs = + GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(URLConnectionFactory.class)); URLConnectionFactory.newDefaultURLConnectionFactory(conf); Assert.assertTrue("Expected log for ssl init failure not found!", diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index b9aae62bd81..a5bf5c1c318 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -182,12 +182,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-jupiter-params test - - org.apache.hadoop - hadoop-logging - test - test-jar - diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 9f74337d7ae..0741f1aed44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.Assert.assertEquals; @@ -49,8 +48,8 @@ public class TestRouterRefreshFairnessPolicyController { private static final Logger LOG = LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class); - private final LogCapturer controllerLog = - LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG); + private final GenericTestUtils.LogCapturer controllerLog = + GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG); private StateStoreDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java index d4f68271350..1f5770b1dda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.federation.router.FederationUtil; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.Test; import org.slf4j.LoggerFactory; @@ -179,7 +179,7 @@ public class TestRouterRpcFairnessPolicyController { private void verifyInstantiationError(Configuration conf, int handlerCount, int totalDedicatedHandlers) { - LogCapturer logs = LogCapturer + GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer .captureLogs(LoggerFactory.getLogger( StaticRouterRpcFairnessPolicyController.class)); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index bb81eaa070b..9ee9692aad1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; @@ -54,7 +55,6 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -322,7 +322,11 @@ public class TestRouterNamenodeMonitoring { int httpsRequests, int requestsPerService) { // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + final LogVerificationAppender appender = + new LogVerificationAppender(); + final org.apache.log4j.Logger logger = + org.apache.log4j.Logger.getRootLogger(); + logger.addAppender(appender); GenericTestUtils.setRootLogLevel(Level.DEBUG); // Setup and start the Router @@ -343,11 +347,8 @@ public class TestRouterNamenodeMonitoring { heartbeatService.getNamenodeStatusReport(); } } - assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), - "JMX URL: https://")); - assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), - "JMX URL: http://")); - logCapturer.stopCapturing(); + assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://")); + assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://")); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 3db20a6e180..d3d34216190 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -135,8 +135,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.logging.LogCapturer; - import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.AfterClass; @@ -2069,8 +2067,8 @@ public class TestRouterRpc { @Test public void testMkdirsWithCallerContext() throws IOException { - LogCapturer auditlog = - LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + GenericTestUtils.LogCapturer auditlog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2096,8 +2094,8 @@ public class TestRouterRpc { @Test public void testRealUserPropagationInCallerContext() throws IOException, InterruptedException { - LogCapturer auditlog = - LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + GenericTestUtils.LogCapturer auditlog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2141,8 +2139,8 @@ public class TestRouterRpc { @Test public void testAddClientIpPortToCallerContext() throws IOException { - LogCapturer auditLog = - LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + GenericTestUtils.LogCapturer auditLog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientIp and ClientPort are not set on the client. // Set client context. @@ -2176,8 +2174,8 @@ public class TestRouterRpc { @Test public void testAddClientIdAndCallIdToCallerContext() throws IOException { - LogCapturer auditLog = - LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + GenericTestUtils.LogCapturer auditLog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientId and ClientCallId are not set on the client. // Set client context. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index caecb697d6d..336ea391385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -72,8 +72,6 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; - import org.junit.Test; import org.slf4j.event.Level; @@ -278,10 +276,12 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc { @Test public void testPreviousBlockNotNull() throws IOException, URISyntaxException { - final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog); + final GenericTestUtils.LogCapturer stateChangeLog = + GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog); GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG); - final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG); + final GenericTestUtils.LogCapturer nameNodeLog = + GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG); GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG); final FederationRPCMetrics metrics = getRouterContext(). @@ -454,8 +454,8 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc { @Test public void testCallerContextWithMultiDestinations() throws IOException { - LogCapturer auditLog = - LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + GenericTestUtils.LogCapturer auditLog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // set client context CallerContext.setCurrent( diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 5c2df9acf4e..8632c567aa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -310,4 +310,14 @@ + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index a8922cbcff3..5f156499ee0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -164,12 +164,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-minikdc test - - org.apache.hadoop - hadoop-logging - test - test-jar - org.mockito mockito-core diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java index a361a280e3e..21c01cebd40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java @@ -31,8 +31,6 @@ import javax.management.ObjectName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.metrics2.util.MBeans; /** @@ -113,8 +111,11 @@ public class MetricsLoggerTask implements Runnable { .substring(0, maxLogLineLength) + "..."); } + // TODO : hadoop-logging module to hide log4j implementation details, this method + // can directly call utility from hadoop-logging. private static boolean hasAppenders(Logger logger) { - return HadoopLoggerUtils.hasAppenders(logger.getName()); + return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders() + .hasMoreElements(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java index 4e8daf319a6..ab301104f2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java @@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics; import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor; import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -110,13 +110,13 @@ public class FsImageValidation { } static void initLogLevels() { - Util.setLogLevel(FSImage.class, "TRACE"); - Util.setLogLevel(FileJournalManager.class, "TRACE"); + Util.setLogLevel(FSImage.class, Level.TRACE); + Util.setLogLevel(FileJournalManager.class, Level.TRACE); - Util.setLogLevel(GSet.class, "OFF"); - Util.setLogLevel(BlockManager.class, "OFF"); - Util.setLogLevel(DatanodeManager.class, "OFF"); - Util.setLogLevel(TopMetrics.class, "OFF"); + Util.setLogLevel(GSet.class, Level.OFF); + Util.setLogLevel(BlockManager.class, Level.OFF); + Util.setLogLevel(DatanodeManager.class, Level.OFF); + Util.setLogLevel(TopMetrics.class, Level.OFF); } static class Util { @@ -127,10 +127,11 @@ public class FsImageValidation { + ", max=" + StringUtils.byteDesc(runtime.maxMemory()); } - static void setLogLevel(Class clazz, String level) { - HadoopLoggerUtils.setLogLevel(clazz.getName(), level); + static void setLogLevel(Class clazz, Level level) { + final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz); + logger.setLevel(level); LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level, - HadoopLoggerUtils.getEffectiveLevel(clazz.getName())); + logger.getEffectiveLevel()); } static String toCommaSeparatedNumber(long n) { diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java similarity index 98% rename from hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java index 2abfffb474b..276e5b0987a 100644 --- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.logging.appenders; +package org.apache.hadoop.hdfs.util; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java new file mode 100644 index 00000000000..10ef47bbbc3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.spi.LoggingEvent; +import org.apache.log4j.spi.ThrowableInformation; + +/** + * Used to verify that certain exceptions or messages are present in log output. + */ +public class LogVerificationAppender extends AppenderSkeleton { + private final List log = new ArrayList(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override + public void close() { + } + + public List getLog() { + return new ArrayList(log); + } + + public int countExceptionsWithMessage(final String text) { + int count = 0; + for (LoggingEvent e: getLog()) { + ThrowableInformation t = e.getThrowableInformation(); + if (t != null) { + String m = t.getThrowable().getMessage(); + if (m.contains(text)) { + count++; + } + } + } + return count; + } + + public int countLinesWithMessage(final String text) { + int count = 0; + for (LoggingEvent e: getLog()) { + String msg = e.getRenderedMessage(); + if (msg != null && msg.contains(text)) { + count++; + } + } + return count; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index 75ad5bd862f..b16f0237b1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -33,8 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.logging.LogCapturer; - +import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; public class TestDFSRename { @@ -190,8 +189,8 @@ public class TestDFSRename { final DistributedFileSystem dfs = cluster.getFileSystem(); Path path = new Path("/test"); dfs.mkdirs(path); - LogCapturer auditLog = - LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + GenericTestUtils.LogCapturer auditLog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); dfs.rename(path, new Path("/dir1"), new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH}); String auditOut = auditLog.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 80424a388b7..5469ebbb757 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -45,9 +45,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; import org.junit.Test; import static org.junit.Assert.*; @@ -317,7 +317,9 @@ public class TestDFSUpgradeFromImage { "imageMD5Digest", "22222222222222222222222222222222"); // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); // Upgrade should now fail try { @@ -329,10 +331,9 @@ public class TestDFSUpgradeFromImage { if (!msg.contains("Failed to load FSImage file")) { throw ioe; } - int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + int md5failures = appender.countExceptionsWithMessage( " is corrupt with MD5 checksum of "); assertEquals("Upgrade did not fail with bad MD5", 1, md5failures); - logCapturer.stopCapturing(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java index c792386c0ed..c57ef941f0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -48,7 +48,7 @@ public class TestDataStream { @Test(timeout = 60000) public void testDfsClient() throws IOException, InterruptedException { - LogCapturer logs = LogCapturer.captureLogs(LoggerFactory + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory .getLogger(DataStreamer.class)); byte[] toWrite = new byte[PACKET_SIZE]; new Random(1).nextBytes(toWrite); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index 4299c111967..f9336fcfdc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.junit.After; import org.junit.Before; @@ -168,9 +168,9 @@ public class TestEncryptedTransfer { FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(SaslDataTransferServer.class)); - LogCapturer logs1 = LogCapturer.captureLogs( + LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); @@ -239,7 +239,7 @@ public class TestEncryptedTransfer { Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient); - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(DataNode.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); @@ -457,9 +457,9 @@ public class TestEncryptedTransfer { fs = getFileSystem(conf); - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(SaslDataTransferServer.class)); - LogCapturer logs1 = LogCapturer.captureLogs( + LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java index c6561287bb0..3dd0b7eb99e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java @@ -54,7 +54,7 @@ import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.After; import org.junit.Assert; import org.junit.Rule; @@ -138,7 +138,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase { HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(DataNode.class)); try { doTest(clientConf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java index 84b7c8f224c..82b8b587694 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 5d2a9270640..d69051c8d7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Test; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java index 7e926a994f3..d32cde83473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.Whitebox; import org.assertj.core.api.Assertions; @@ -236,8 +235,8 @@ public class TestBlockManagerSafeMode { public void testCheckSafeMode9() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000); - LogCapturer logs = - LogCapturer.captureLogs(BlockManagerSafeMode.LOG); + GenericTestUtils.LogCapturer logs = + GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG); BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm, fsn, true, conf); String content = logs.getOutput(); @@ -248,8 +247,8 @@ public class TestBlockManagerSafeMode { public void testCheckSafeMode10(){ Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1); - LogCapturer logs = - LogCapturer.captureLogs(BlockManagerSafeMode.LOG); + GenericTestUtils.LogCapturer logs = + GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG); BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm, fsn, true, conf); String content = logs.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java index 87c83836e78..ea7347f9e50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Test; import org.mockito.Mockito; import org.slf4j.LoggerFactory; @@ -575,7 +575,7 @@ public class TestPendingReconstruction { new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); DFSTestUtil.setNameNodeLogLevel(Level.DEBUG); - LogCapturer logs = LogCapturer + LogCapturer logs = GenericTestUtils.LogCapturer .captureLogs(LoggerFactory.getLogger("BlockStateChange")); BlockManager bm = cluster.getNamesystem().getBlockManager(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index c4b5f7aa6a0..20163cc5fa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSI import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -40,7 +41,6 @@ import java.util.Set; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.fs.ContentSummary; @@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; @@ -66,15 +67,16 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import org.slf4j.LoggerFactory; @RunWith(Parameterized.class) public class TestReplicationPolicy extends BaseReplicationPolicyTest { @@ -505,26 +507,26 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } - - final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); - + + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + // try to choose NUM_OF_DATANODES which is more than actually available // nodes. DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length); assertEquals(targets.length, dataNodes.length - 2); - boolean isFound = false; - for (String logLine : logCapturer.getOutput().split("\n")) { - // Suppose to place replicas on each node but two data nodes are not - // available for placing replica, so here we expect a short of 2 - if(logLine.contains("WARN") && logLine.contains("in need of 2")) { - isFound = true; - break; - } - } - assertTrue("Could not find the block placement log specific to 2 datanodes not being " - + "available for placing replicas", isFound); - logCapturer.stopCapturing(); + final List log = appender.getLog(); + assertNotNull(log); + assertFalse(log.size() == 0); + final LoggingEvent lastLogEntry = log.get(log.size() - 1); + + assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel())); + // Suppose to place replicas on each node but two data nodes are not + // available for placing replica, so here we expect a short of 2 + assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2")); + resetHeartbeatForStorages(); } @@ -1708,14 +1710,17 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { @Test public void testChosenFailureForStorageType() { - final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1, dataNodes[0], new ArrayList(), false, null, BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy( HdfsConstants.StoragePolicy.COLD.value()), null); assertEquals(0, targets.length); assertNotEquals(0, - StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE")); + appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE")); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 13efcf783a9..73201ba6054 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -27,6 +27,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Collections; +import java.util.List; import java.util.Random; import java.util.concurrent.TimeoutException; @@ -38,15 +39,19 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.log4j.Appender; +import org.apache.log4j.AsyncAppender; import org.junit.After; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import java.util.function.Supplier; + /** * Test periodic logging of DataNode metrics. */ @@ -123,13 +128,13 @@ public class TestDataNodeMetricsLogger { } @Test - @SuppressWarnings("unchecked") public void testMetricsLoggerIsAsync() throws IOException { startDNForTest(true); assertNotNull(dn); - assertTrue(Collections.list( - org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders()) - .get(0) instanceof org.apache.log4j.AsyncAppender); + org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME); + @SuppressWarnings("unchecked") + List appenders = Collections.list(logger.getAllAppenders()); + assertTrue(appenders.get(0) instanceof AsyncAppender); } /** @@ -144,15 +149,27 @@ public class TestDataNodeMetricsLogger { metricsProvider); startDNForTest(true); assertNotNull(dn); - LogCapturer logCapturer = - LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME)); + final PatternMatchingAppender appender = + (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME) + .getAppender("PATTERNMATCHERAPPENDER"); + // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"), - 1000, 60000); - logCapturer.stopCapturing(); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return appender.isMatched(); + } + }, 1000, 60000); + dn.shutdown(); } + private void addAppender(org.apache.log4j.Logger logger, Appender appender) { + @SuppressWarnings("unchecked") + List appenders = Collections.list(logger.getAllAppenders()); + ((AsyncAppender) appenders.get(0)).addAppender(appender); + } + public interface TestFakeMetricMXBean { int getFakeMetric(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 82d7a815748..74c70cec769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -76,9 +77,10 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; +import org.apache.log4j.SimpleLayout; +import org.apache.log4j.WriterAppender; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -412,9 +414,14 @@ public class TestDirectoryScanner { @Test(timeout=600000) public void testScanDirectoryStructureWarn() throws Exception { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); //add a logger stream to check what has printed to log + ByteArrayOutputStream loggerStream = new ByteArrayOutputStream(); + org.apache.log4j.Logger rootLogger = + org.apache.log4j.Logger.getRootLogger(); GenericTestUtils.setRootLogLevel(Level.INFO); + WriterAppender writerAppender = + new WriterAppender(new SimpleLayout(), loggerStream); + rootLogger.addAppender(writerAppender); Configuration conf = getConfiguration(); cluster = new MiniDFSCluster @@ -445,7 +452,7 @@ public class TestDirectoryScanner { scan(1, 1, 0, 1, 0, 0, 0); //ensure the warn log not appear and missing block log do appear - String logContent = logCapturer.getOutput(); + String logContent = new String(loggerStream.toByteArray()); String missingBlockWarn = "Deleted a metadata file" + " for the deleted block"; String dirStructureWarnLog = " found in invalid directory." + @@ -457,7 +464,6 @@ public class TestDirectoryScanner { LOG.info("check pass"); } finally { - logCapturer.stopCapturing(); if (scanner != null) { scanner.shutdown(); scanner = null; @@ -520,7 +526,7 @@ public class TestDirectoryScanner { client = cluster.getFileSystem().getClient(); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1); // log trace - LogCapturer logCapturer = LogCapturer. + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer. captureLogs(NameNode.stateChangeLog); // Add files with 5 blocks createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java index c7fc71f5375..8b1a6c0814c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import net.jcip.annotations.NotThreadSafe; - -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; @@ -53,6 +51,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; @@ -80,10 +79,10 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -394,7 +393,9 @@ public class TestFsDatasetCache { } // nth file should hit a capacity exception - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1])); GenericTestUtils.waitFor(new Supplier() { @@ -402,12 +403,11 @@ public class TestFsDatasetCache { public Boolean get() { // check the log reported by FsDataSetCache // in the case that cache capacity is exceeded. - int lines = StringUtils.countMatches(logCapturer.getOutput(), + int lines = appender.countLinesWithMessage( "could not reserve more bytes in the cache: "); return lines > 0; } }, 500, 30000); - logCapturer.stopCapturing(); // Also check the metrics for the failure assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index 8f3ef447a6e..073bb532ddf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; @@ -322,7 +321,7 @@ public class TestDiskBalancer { 0); DFSTestUtil.waitReplication(fs, filePath, (short) 1); - LogCapturer logCapturer = LogCapturer + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(DiskBalancer.LOG); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java new file mode 100644 index 00000000000..f099dfae733 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.regex.Pattern; + +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.spi.LoggingEvent; + +/** + * An appender that matches logged messages against the given + * regular expression. + */ +public class PatternMatchingAppender extends AppenderSkeleton { + private final Pattern pattern; + private volatile boolean matched; + + public PatternMatchingAppender() { + this.pattern = Pattern.compile("^.*FakeMetric.*$"); + this.matched = false; + } + + public boolean isMatched() { + return matched; + } + + @Override + protected void append(LoggingEvent event) { + if (pattern.matcher(event.getMessage().toString()).matches()) { + matched = true; + } + } + + @Override + public void close() { + } + + @Override + public boolean requiresLayout() { + return false; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 617f38a63f1..c00649a9db5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -37,7 +37,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authorize.ProxyServers; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.Lists; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java index fec16c13fd9..d34d6ca7379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java @@ -41,7 +41,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import java.io.IOException; import java.security.PrivilegedExceptionAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 953d1ef7c02..0f736696751 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -24,6 +24,7 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; @@ -38,9 +39,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.log4j.Appender; +import org.apache.log4j.AsyncAppender; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; @@ -103,7 +107,6 @@ public class TestAuditLogs { UserGroupInformation userGroupInfo; @Before - @SuppressWarnings("unchecked") public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled @@ -119,9 +122,11 @@ public class TestAuditLogs { util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be - assertTrue(Collections.list(org.apache.log4j.Logger.getLogger( - "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders()) - .get(0) instanceof org.apache.log4j.AsyncAppender); + Logger logger = org.apache.log4j.Logger.getLogger( + "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit"); + @SuppressWarnings("unchecked") + List appenders = Collections.list(logger.getAllAppenders()); + assertTrue(appenders.get(0) instanceof AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index ccc6be33c9c..d675dcda988 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -82,7 +82,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; @@ -863,7 +863,7 @@ public class TestCheckpoint { savedSd = sd; } - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(Storage.class)); try { // try to lock the storage that's already locked diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java index 73aee349da1..771caefd20a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index c68ad185707..17803a07869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -83,7 +83,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; @@ -91,6 +90,9 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.LogManager; +import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -1715,13 +1717,36 @@ public class TestEditLog { } } + class TestAppender extends AppenderSkeleton { + private final List log = new ArrayList<>(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override + public void close() { + } + + public List getLog() { + return new ArrayList<>(log); + } + } + /** * * @throws Exception */ @Test public void testReadActivelyUpdatedLog() throws Exception { - final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + final TestAppender appender = new TestAppender(); + LogManager.getRootLogger().addAppender(appender); Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); // Set single handler thread, so all transactions hit same thread-local ops. @@ -1769,16 +1794,21 @@ public class TestEditLog { rwf.close(); events.poll(); - for (String logLine : logCapturer.getOutput().split("\n")) { - if (logLine != null && logLine.contains("Caught exception after reading")) { + String pattern = "Caught exception after reading (.*) ops"; + Pattern r = Pattern.compile(pattern); + final List log = appender.getLog(); + for (LoggingEvent event : log) { + Matcher m = r.matcher(event.getRenderedMessage()); + if (m.find()) { fail("Should not try to read past latest syned edit log op"); } } + } finally { if (cluster != null) { cluster.shutdown(); } - logCapturer.stopCapturing(); + LogManager.getRootLogger().removeAppender(appender); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java index fb484cd3ea0..3b15c2db7a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java @@ -26,8 +26,6 @@ import java.io.IOException; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; - import org.junit.Assert; import org.junit.Test; @@ -120,8 +118,8 @@ public class TestEditsDoubleBuffer { op3.setTransactionId(3); buffer.writeOp(op3, fakeLogVersion); - LogCapturer logs = - LogCapturer.captureLogs(EditsDoubleBuffer.LOG); + GenericTestUtils.LogCapturer logs = + GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG); try { buffer.close(); fail(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 860e6b0b256..89193ca6633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.FakeTimer; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index afb049156e3..f0ae1810167 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -25,7 +25,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java index 08c9240f26b..9c77f9d92b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java @@ -29,8 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -60,7 +58,7 @@ public class TestFSNamesystemLockReport { private MiniDFSCluster cluster; private FileSystem fs; private UserGroupInformation userGroupInfo; - private LogCapturer logs; + private GenericTestUtils.LogCapturer logs; @Before public void setUp() throws Exception { @@ -78,7 +76,7 @@ public class TestFSNamesystemLockReport { userGroupInfo = UserGroupInformation.createUserForTesting("bob", new String[] {"hadoop"}); - logs = LogCapturer.captureLogs(FSNamesystem.LOG); + logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG); GenericTestUtils .setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()), org.slf4j.event.Level.INFO); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 96650a4d5ee..a312b03168b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -115,7 +115,7 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.AfterClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 651d4f31c9d..464fdfcd6c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -18,13 +18,15 @@ package org.apache.hadoop.hdfs.server.namenode; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.log4j.Appender; +import org.apache.log4j.AsyncAppender; import org.junit.Rule; import org.junit.Test; @@ -32,6 +34,7 @@ import org.junit.rules.Timeout; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; @@ -61,12 +64,12 @@ public class TestNameNodeMetricsLogger { } @Test - @SuppressWarnings("unchecked") public void testMetricsLoggerIsAsync() throws IOException { makeNameNode(true); org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME); - assertTrue(Collections.list(logger.getAllAppenders()).get(0) - instanceof org.apache.log4j.AsyncAppender); + @SuppressWarnings("unchecked") + List appenders = Collections.list(logger.getAllAppenders()); + assertTrue(appenders.get(0) instanceof AsyncAppender); } /** @@ -77,14 +80,20 @@ public class TestNameNodeMetricsLogger { public void testMetricsLogOutput() throws IOException, InterruptedException, TimeoutException { TestFakeMetric metricsProvider = new TestFakeMetric(); - MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider); + MBeans.register(this.getClass().getSimpleName(), + "DummyMetrics", metricsProvider); makeNameNode(true); // Log metrics early and often. - LogCapturer logCapturer = - LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME)); + final PatternMatchingAppender appender = + (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME) + .getAppender("PATTERNMATCHERAPPENDER"); - GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"), - 1000, 60000); - logCapturer.stopCapturing(); + // Ensure that the supplied pattern was matched. + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return appender.isMatched(); + } + }, 1000, 60000); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java index 8750154077f..073ee377819 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java @@ -28,8 +28,7 @@ import java.util.Collection; import org.junit.Test; import org.slf4j.LoggerFactory; - -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; public class TestNameNodeResourcePolicy { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 7ea0b24f2be..67c8f3c18f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -68,12 +69,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -523,8 +524,10 @@ public class TestStartup { // Corrupt the md5 files in all the namedirs corruptFSImageMD5(true); - // Attach our own log appender so we can verify output - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + // Attach our own log appender so we can verify output + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); // Try to start a new cluster LOG.info("\n===========================================\n" + @@ -538,13 +541,10 @@ public class TestStartup { } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Failed to load FSImage file", ioe); - - int md5failures = - org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), - " is corrupt with MD5 checksum of "); + int md5failures = appender.countExceptionsWithMessage( + " is corrupt with MD5 checksum of "); // Two namedirs, so should have seen two failures assertEquals(2, md5failures); - logCapturer.stopCapturing(); } } finally { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 7376237a4c1..0e83bec11f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -197,7 +197,7 @@ public class TestBootstrapStandby { // Trying to bootstrap standby should now fail since the edit // logs aren't available in the shared dir. - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(BootstrapStandby.class)); try { assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 6fa979d039a..168273117b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -44,7 +44,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.Whitebox; import org.junit.After; import org.junit.Before; @@ -144,7 +143,7 @@ public class TestDelegationTokensWithHA { () -> (DistributedFileSystem) FileSystem.get(conf)); GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG); - LogCapturer logCapture = LogCapturer + GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer .captureLogs(ObserverReadProxyProvider.LOG); try { dfs.access(new Path("/"), FsAction.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 3dbadcaaf08..513f60cb1ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.common.Util; @@ -47,12 +48,12 @@ import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.ipc.StandbyException; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; +import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -298,38 +299,39 @@ public class TestStandbyCheckpoints { @Test(timeout = 30000) public void testCheckpointBeforeNameNodeInitializationIsComplete() throws Exception { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + final LogVerificationAppender appender = new LogVerificationAppender(); + final org.apache.log4j.Logger logger = org.apache.log4j.Logger + .getRootLogger(); + logger.addAppender(appender); - try { - // Transition 2 to observer - cluster.transitionToObserver(2); - doEdits(0, 10); - // After a rollEditLog, Standby(nn1)'s next checkpoint would be - // ahead of observer(nn2). - nns[0].getRpcServer().rollEditLog(); + // Transition 2 to observer + cluster.transitionToObserver(2); + doEdits(0, 10); + // After a rollEditLog, Standby(nn1)'s next checkpoint would be + // ahead of observer(nn2). + nns[0].getRpcServer().rollEditLog(); - NameNode nn2 = nns[2]; - FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); + NameNode nn2 = nns[2]; + FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); - // After standby creating a checkpoint, it will try to push the image to - // active and all observer, updating it's own txid to the most recent. - HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); - HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); + // After standby creating a checkpoint, it will try to push the image to + // active and all observer, updating it's own txid to the most recent. + HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); + HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); - NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); - cluster.transitionToStandby(2); + NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); + cluster.transitionToStandby(2); + logger.removeAppender(appender); - for (String logLine : logCapturer.getOutput().split("\n")) { - if (logLine != null && logLine.contains("PutImage failed") && logLine.contains( - "FSImage has not been set in the NameNode.")) { - //Logs have the expected exception. - return; - } + for (LoggingEvent event : appender.getLog()) { + String message = event.getRenderedMessage(); + if (message.contains("PutImage failed") && + message.contains("FSImage has not been set in the NameNode.")) { + //Logs have the expected exception. + return; } - fail("Expected exception not present in logs."); - } finally { - logCapturer.stopCapturing(); } + fail("Expected exception not present in logs."); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java index 3741bbf015f..58d72f14d73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java @@ -93,7 +93,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.ExitUtil; import org.junit.After; @@ -1372,7 +1372,7 @@ public class TestExternalStoragePolicySatisfier { Path filePath = new Path("/zeroSizeFile"); DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0); fs.setReplication(filePath, (short) 3); - LogCapturer logs = LogCapturer.captureLogs( + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class)); fs.setStoragePolicy(filePath, "COLD"); fs.satisfyStoragePolicy(filePath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties index b739b25f352..368deef4020 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties @@ -22,6 +22,9 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n +# Only to be used for testing +log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender + # # NameNode metrics logging. # The default is to retain two namenode-metrics.log files up to 64MB each. @@ -29,10 +32,10 @@ log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:% # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref -namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA +namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger} log4j.additivity.NameNodeMetricsLog=false -log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender +log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log @@ -45,10 +48,10 @@ log4j.appender.ASYNCNNMETRICSRFA.maxBackupIndex=1 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref -datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA +datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger} log4j.additivity.DataNodeMetricsLog=false -log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender +log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log @@ -69,7 +72,7 @@ hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false -log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender +log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender log4j.appender.ASYNCAUDITAPPENDER.blocking=false log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256 log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index 142c1ab31d1..e3b3511c0ce 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -124,12 +124,6 @@ assertj-core test - - org.apache.hadoop - hadoop-logging - test - test-jar - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index cb5f3edd054..15682eeefc6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -36,10 +36,9 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; - -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.junit.After; @@ -108,10 +107,12 @@ import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ControlledClock; import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.mockito.ArgumentCaptor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -127,6 +128,29 @@ public class TestTaskAttempt{ } } + private static class TestAppender extends AppenderSkeleton { + + private final List logEvents = new CopyOnWriteArrayList<>(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + public void close() { + } + + @Override + protected void append(LoggingEvent arg0) { + logEvents.add(arg0); + } + + private List getLogEvents() { + return logEvents; + } + } + @BeforeClass public static void setupBeforeClass() { ResourceUtils.resetResourceTypes(new Configuration()); @@ -1700,10 +1724,11 @@ public class TestTaskAttempt{ for (String memoryName : ImmutableList.of( MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class); - LogCapturer logCapturer = LogCapturer.captureLogs(logger); + TestAppender testAppender = new TestAppender(); + final Logger logger = Logger.getLogger(TaskAttemptImpl.class); try { TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear(); + logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); JobConf jobConf = new JobConf(); @@ -1716,11 +1741,13 @@ public class TestTaskAttempt{ getResourceInfoFromContainerRequest(taImpl, eventHandler). getMemorySize(); assertEquals(3072, memorySize); - assertTrue(logCapturer.getOutput().contains( - "Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is " - + "overriding the mapreduce.reduce.memory.mb=2048 configuration")); + assertTrue(testAppender.getLogEvents().stream() + .anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " + + "mapreduce.reduce.resource." + memoryName + "=3Gi is " + + "overriding the mapreduce.reduce.memory.mb=2048 configuration") + .equals(e.getMessage()))); } finally { - logCapturer.stopCapturing(); + logger.removeAppender(testAppender); } } } @@ -1782,9 +1809,10 @@ public class TestTaskAttempt{ @Test public void testReducerCpuRequestOverriding() { - final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class); - final LogCapturer logCapturer = LogCapturer.captureLogs(logger); + TestAppender testAppender = new TestAppender(); + final Logger logger = Logger.getLogger(TaskAttemptImpl.class); try { + logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); JobConf jobConf = new JobConf(); @@ -1797,11 +1825,13 @@ public class TestTaskAttempt{ getResourceInfoFromContainerRequest(taImpl, eventHandler). getVirtualCores(); assertEquals(7, vCores); - assertTrue(logCapturer.getOutput().contains( - "Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the " - + "mapreduce.reduce.cpu.vcores=9 configuration")); + assertTrue(testAppender.getLogEvents().stream().anyMatch( + e -> e.getLevel() == Level.WARN && ("Configuration " + + "mapreduce.reduce.resource.vcores=7 is overriding the " + + "mapreduce.reduce.cpu.vcores=9 configuration").equals( + e.getMessage()))); } finally { - logCapturer.stopCapturing(); + logger.removeAppender(testAppender); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index d124c97e9da..7530428d752 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -72,12 +72,6 @@ assertj-core test - - org.apache.hadoop - hadoop-logging - test - test-jar - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java index 43ab1701601..a0223dedd64 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java @@ -23,10 +23,12 @@ import java.io.BufferedReader; import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; +import java.io.Flushable; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; +import java.util.Enumeration; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -42,13 +44,16 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SecureIOUtils; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.util.ProcessTree; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.log4j.Appender; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; @@ -271,7 +276,42 @@ public class TaskLog { } // flush & close all appenders - HadoopLoggerUtils.shutdownLogManager(); + LogManager.shutdown(); + } + + @SuppressWarnings("unchecked") + public static synchronized void syncLogs() { + // flush standard streams + // + System.out.flush(); + System.err.flush(); + + // flush flushable appenders + // + final Logger rootLogger = Logger.getRootLogger(); + flushAppenders(rootLogger); + final Enumeration allLoggers = rootLogger.getLoggerRepository(). + getCurrentLoggers(); + while (allLoggers.hasMoreElements()) { + final Logger l = allLoggers.nextElement(); + flushAppenders(l); + } + } + + @SuppressWarnings("unchecked") + private static void flushAppenders(Logger l) { + final Enumeration allAppenders = l.getAllAppenders(); + while (allAppenders.hasMoreElements()) { + final Appender a = allAppenders.nextElement(); + if (a instanceof Flushable) { + try { + ((Flushable) a).flush(); + } catch (IOException ioe) { + System.err.println(a + ": Failed to flush!" + + StringUtils.stringifyException(ioe)); + } + } + } } public static ScheduledExecutorService createLogSyncer() { @@ -296,7 +336,7 @@ public class TaskLog { new Runnable() { @Override public void run() { - HadoopLoggerUtils.syncLogs(); + TaskLog.syncLogs(); } }, 0L, 5L, TimeUnit.SECONDS); return scheduler; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java index f83835f5383..e91b4c1e854 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java @@ -28,19 +28,24 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.LineNumberReader; +import java.io.StringReader; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapred.TaskReport; import org.apache.hadoop.mapreduce.JobStatus.State; import org.apache.hadoop.mapreduce.protocol.ClientProtocol; +import org.apache.log4j.Layout; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.WriterAppender; import org.mockito.stubbing.Answer; -import org.slf4j.LoggerFactory; /** * Test to make sure that command line output for @@ -68,53 +73,55 @@ public class TestJobMonitorAndPrint { @Test public void testJobMonitorAndPrint() throws Exception { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class)); - try { - JobStatus jobStatus_1 = - new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING, - JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", - true); - JobStatus jobStatus_2 = - new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, - "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); + JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, + 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", + "tmp-queue", "tmp-jobfile", "tmp-url", true); + JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, + 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", + "tmp-queue", "tmp-jobfile", "tmp-url", true); - doAnswer((Answer) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when( - job).getTaskCompletionEvents(anyInt(), anyInt()); + doAnswer((Answer) invocation -> + TaskCompletionEvent.EMPTY_ARRAY).when(job) + .getTaskCompletionEvents(anyInt(), anyInt()); - doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); - when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); + doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); + when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); + // setup the logger to capture all logs + Layout layout = + Logger.getRootLogger().getAppender("stdout").getLayout(); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + WriterAppender appender = new WriterAppender(layout, os); + appender.setThreshold(Level.ALL); + Logger qlogger = Logger.getLogger(Job.class); + qlogger.addAppender(appender); - job.monitorAndPrintJob(); + job.monitorAndPrintJob(); - boolean foundHundred = false; - boolean foundComplete = false; - boolean foundUber = false; - String uberModeMatch = "uber mode : true"; - String progressMatch = "map 100% reduce 100%"; - String completionMatch = "completed successfully"; - for (String logLine : logCapturer.getOutput().split("\n")) { - if (logLine.contains(uberModeMatch)) { - foundUber = true; - } - if (logLine.contains(progressMatch)) { - foundHundred = true; - } - if (logLine.contains(completionMatch)) { - foundComplete = true; - } - if (foundUber && foundHundred && foundComplete) { - break; - } + qlogger.removeAppender(appender); + LineNumberReader r = new LineNumberReader(new StringReader(os.toString())); + String line; + boolean foundHundred = false; + boolean foundComplete = false; + boolean foundUber = false; + String uberModeMatch = "uber mode : true"; + String progressMatch = "map 100% reduce 100%"; + String completionMatch = "completed successfully"; + while ((line = r.readLine()) != null) { + if (line.contains(uberModeMatch)) { + foundUber = true; } - assertTrue(foundUber); - assertTrue(foundHundred); - assertTrue(foundComplete); - - System.out.println("The output of job.toString() is : \n" + job.toString()); - assertTrue(job.toString().contains("Number of maps: 5\n")); - assertTrue(job.toString().contains("Number of reduces: 5\n")); - } finally { - logCapturer.stopCapturing(); + foundHundred = line.contains(progressMatch); + if (foundHundred) + break; } + line = r.readLine(); + foundComplete = line.contains(completionMatch); + assertTrue(foundUber); + assertTrue(foundHundred); + assertTrue(foundComplete); + + System.out.println("The output of job.toString() is : \n" + job.toString()); + assertTrue(job.toString().contains("Number of maps: 5\n")); + assertTrue(job.toString().contains("Number of reduces: 5\n")); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 632e972d5ab..17358a37da3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -128,12 +128,6 @@ assertj-core test - - org.apache.hadoop - hadoop-logging - test - test-jar - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 063f185d3d7..0bdc7212179 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -34,6 +34,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -44,6 +45,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; @@ -53,7 +55,6 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobPriority; import org.apache.hadoop.mapreduce.JobStatus.State; @@ -109,6 +110,13 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.apache.log4j.Appender; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Layout; +import org.apache.log4j.Level; +import org.apache.log4j.SimpleLayout; +import org.apache.log4j.WriterAppender; +import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -136,6 +144,29 @@ public class TestYARNRunner { MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%")); private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource"; + private static class TestAppender extends AppenderSkeleton { + + private final List logEvents = new CopyOnWriteArrayList<>(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + public void close() { + } + + @Override + protected void append(LoggingEvent arg0) { + logEvents.add(arg0); + } + + private List getLogEvents() { + return logEvents; + } + } + private YARNRunner yarnRunner; private ResourceMgrDelegate resourceMgrDelegate; private YarnConfiguration conf; @@ -518,48 +549,38 @@ public class TestYARNRunner { assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex); } } - @Test(timeout=20000) public void testWarnCommandOpts() throws Exception { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class)); - try { - JobConf jobConf = new JobConf(); - - jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, - "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); - jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); - - YARNRunner yarnRunner = new YARNRunner(jobConf); - - @SuppressWarnings("unused") - ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf); - - boolean isFoundOne = false; - boolean isFoundTwo = false; - for (String logLine : logCapturer.getOutput().split("\n")) { - if (logLine == null) { - continue; - } - if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in " - + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " - + "longer function if hadoop native libraries are used. These values " - + "should be set as part of the LD_LIBRARY_PATH in the app master JVM " - + "env using yarn.app.mapreduce.am.admin.user.env config settings.")) { - isFoundOne = true; - } - if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in " - + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " - + "function if hadoop native libraries are used. These values should " - + "be set as part of the LD_LIBRARY_PATH in the app master JVM env " - + "using yarn.app.mapreduce.am.env config settings.")) { - isFoundTwo = true; - } - } - assertTrue(isFoundOne); - assertTrue(isFoundTwo); - } finally { - logCapturer.stopCapturing(); - } + org.apache.log4j.Logger logger = + org.apache.log4j.Logger.getLogger(YARNRunner.class); + + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + Layout layout = new SimpleLayout(); + Appender appender = new WriterAppender(layout, bout); + logger.addAppender(appender); + + JobConf jobConf = new JobConf(); + + jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); + jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); + + YARNRunner yarnRunner = new YARNRunner(jobConf); + + @SuppressWarnings("unused") + ApplicationSubmissionContext submissionContext = + buildSubmitContext(yarnRunner, jobConf); + + String logMsg = bout.toString(); + assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + + "longer function if hadoop native libraries are used. These values " + + "should be set as part of the LD_LIBRARY_PATH in the app master JVM " + + "env using yarn.app.mapreduce.am.admin.user.env config settings.")); + assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + + "function if hadoop native libraries are used. These values should " + + "be set as part of the LD_LIBRARY_PATH in the app master JVM env " + + "using yarn.app.mapreduce.am.env config settings.")); } @Test(timeout=20000) @@ -975,7 +996,10 @@ public class TestYARNRunner { for (String memoryName : ImmutableList.of( MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class)); + TestAppender testAppender = new TestAppender(); + org.apache.log4j.Logger logger = + org.apache.log4j.Logger.getLogger(YARNRunner.class); + logger.addAppender(testAppender); try { JobConf jobConf = new JobConf(); jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi"); @@ -993,17 +1017,13 @@ public class TestYARNRunner { long memorySize = resourceRequest.getCapability().getMemorySize(); Assert.assertEquals(3072, memorySize); - boolean isLogFound = false; - for (String logLine : logCapturer.getOutput().split("\n")) { - if (logLine != null && logLine.contains("WARN") && logLine.contains( - "Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " - + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) { - isLogFound = true; - } - } - assertTrue("Log line could not be found", isLogFound); + assertTrue(testAppender.getLogEvents().stream().anyMatch( + e -> e.getLevel() == Level.WARN && ("Configuration " + + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " + + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + + "configuration").equals(e.getMessage()))); } finally { - logCapturer.stopCapturing(); + logger.removeAppender(testAppender); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java index cc93e5629d1..338f1172b04 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java @@ -29,6 +29,8 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; +import org.apache.log4j.Level; +import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -74,10 +76,12 @@ public class TestChild extends HadoopTestCase { mapJavaOpts, mapJavaOpts, MAP_OPTS_VAL); } - - String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO"); - assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel, - "OFF"); + + Level logLevel = + Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, + Level.INFO.toString())); + assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + + logLevel, logLevel, Level.OFF); } } @@ -104,10 +108,12 @@ public class TestChild extends HadoopTestCase { reduceJavaOpts, reduceJavaOpts, REDUCE_OPTS_VAL); } - - String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO"); - assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel, - "OFF"); + + Level logLevel = + Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, + Level.INFO.toString())); + assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + + logLevel, logLevel, Level.OFF); } } @@ -121,9 +127,9 @@ public class TestChild extends HadoopTestCase { conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL); conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL); } - - conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF"); - conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF"); + + conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString()); + conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString()); Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, numMaps, numReds); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java index d1fc8c04aa1..9e58d460d17 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java @@ -25,7 +25,6 @@ import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Assert; @@ -51,6 +50,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,7 +64,8 @@ public class TestJHSSecurity { @Test public void testDelegationToken() throws Exception { - HadoopLoggerUtils.setLogLevel("root", "DEBUG"); + org.apache.log4j.Logger rootLogger = LogManager.getRootLogger(); + rootLogger.setLevel(Level.DEBUG); final YarnConfiguration conf = new YarnConfiguration(new JobConf()); // Just a random principle diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index f653ce7c0cd..43d3abe4f8d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -99,6 +99,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -556,9 +557,9 @@ public class TestMRJobs { systemClasses); } sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB); - sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL"); - sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL"); - sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString()); sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class"); final SleepJob sleepJob = new SleepJob(); sleepJob.setConf(sleepConf); @@ -855,11 +856,11 @@ public class TestMRJobs { final SleepJob sleepJob = new SleepJob(); final JobConf sleepConf = new JobConf(mrCluster.getConfig()); - sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); final long userLogKb = 4; sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb); sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3); - sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); final long amLogKb = 7; sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb); sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7); diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 3ebab5a30b8..c4dfd2f9d7c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1944,18 +1944,6 @@ log4j-web ${log4j2.version} - - org.apache.hadoop - hadoop-logging - ${hadoop.version} - - - org.apache.hadoop - hadoop-logging - ${hadoop.version} - test - test-jar - diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 373b5a07df1..e8c5fb78efd 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -349,12 +349,7 @@ hamcrest-library test - - org.apache.hadoop - hadoop-logging - test - test-jar - + diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java index 2a124c1c99d..1e7330fbd0b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Before; import org.junit.Rule; import org.junit.Test; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java index 6acab8fe2a0..476d7a4f01e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java @@ -23,7 +23,7 @@ import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Test; import org.slf4j.Logger; diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 06c2e192f08..5194e51d81f 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -81,12 +81,6 @@ hadoop-hdfs-client provided - - org.apache.hadoop - hadoop-logging - test - test-jar - org.apache.hadoop hadoop-hdfs diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index d54fbaa86f2..aa42cb968d6 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -41,7 +41,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; @@ -702,8 +701,8 @@ public abstract class AbstractContractDistCpTest GenericTestUtils .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth()); - LogCapturer log = - LogCapturer.captureLogs(SimpleCopyListing.LOG); + GenericTestUtils.LogCapturer log = + GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG); String options = "-useiterator -update -delete" + getDefaultCLIOptions(); DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java index 661573f9d85..02fd48a071b 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java @@ -27,10 +27,11 @@ import java.util.regex.Pattern; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.tools.rumen.datatypes.*; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; /** * A default parser for MapReduce job configuration properties. @@ -82,7 +83,7 @@ public class MapReduceJobPropertiesParser implements JobPropertyParser { // turn off the warning w.r.t deprecated mapreduce keys static { - HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF"); + Logger.getLogger(Configuration.class).setLevel(Level.OFF); } // Accepts a key if there is a corresponding key in the current mapreduce diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index d901513f2c1..81e888472d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -576,6 +576,16 @@ + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index b41923ef9de..a15c78e4267 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -63,7 +63,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -127,6 +126,7 @@ import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.TimelineServiceHelper; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; +import org.apache.log4j.LogManager; import org.apache.hadoop.classification.VisibleForTesting; import com.sun.jersey.api.client.ClientHandlerException; @@ -403,7 +403,7 @@ public class ApplicationMaster { result = appMaster.finish(); } catch (Throwable t) { LOG.error("Error running ApplicationMaster", t); - HadoopLoggerUtils.shutdownLogManager(); + LogManager.shutdown(); ExitUtil.terminate(1, t); } finally { if (appMaster != null) { @@ -529,7 +529,7 @@ public class ApplicationMaster { //Check whether customer log4j.properties file exists if (fileExist(log4jPath)) { try { - HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class, + Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index dc23682f1a3..098f3981cfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -52,7 +52,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -452,7 +451,7 @@ public class Client { if (cliParser.hasOption("log_properties")) { String log4jPath = cliParser.getOptionValue("log_properties"); try { - HadoopLoggerUtils.updateLog4jConfiguration(Client.class, log4jPath); + Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java new file mode 100644 index 00000000000..0301a6880f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.applications.distributedshell; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.Map.Entry; +import java.util.Properties; + +import org.apache.log4j.LogManager; +import org.apache.log4j.PropertyConfigurator; + +public class Log4jPropertyHelper { + + public static void updateLog4jConfiguration(Class targetClass, + String log4jPath) throws Exception { + Properties customProperties = new Properties(); + try ( + FileInputStream fs = new FileInputStream(log4jPath); + InputStream is = targetClass.getResourceAsStream("/log4j.properties")) { + customProperties.load(fs); + Properties originalProperties = new Properties(); + originalProperties.load(is); + for (Entry entry : customProperties.entrySet()) { + originalProperties.setProperty(entry.getKey().toString(), entry + .getValue().toString()); + } + LogManager.resetConfiguration(); + PropertyConfigurator.configure(originalProperties); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java index 553465313d2..60c06e9aa75 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java @@ -43,6 +43,7 @@ import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.client.util.YarnClientUtils; +import org.apache.log4j.Logger; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.servlet.ServletContextHandler; @@ -51,8 +52,6 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Test Spnego Client Login. @@ -77,7 +76,8 @@ public class TestSecureApiServiceClient extends KerberosSecurityTestcase { private Map props; private static Server server; - private static Logger LOG = LoggerFactory.getLogger(TestSecureApiServiceClient.class); + private static Logger LOG = Logger + .getLogger(TestSecureApiServiceClient.class); private ApiServiceClient asc; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java index 52ae87671a2..f8f948dd88f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.service.api.records.ServiceState; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType; +import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -48,6 +49,8 @@ import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants */ public class TestComponent { + static final Logger LOG = Logger.getLogger(TestComponent.class); + @Rule public ServiceTestUtils.ServiceFSWatcher rule = new ServiceTestUtils.ServiceFSWatcher(); diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java similarity index 93% rename from hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java index cf7a2bfe0d9..fffc8a857cb 100644 --- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java @@ -16,10 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.logging.appenders; +package org.apache.hadoop.yarn.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -111,13 +113,16 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { /** * Create an appender to keep track of the errors and warnings logged by the * system. - * - * @param cleanupIntervalSeconds the interval at which old messages are purged to prevent the - * message stores from growing unbounded. - * @param messageAgeLimitSeconds the maximum age of a message in seconds before it is purged from - * the store. - * @param maxUniqueMessages the maximum number of unique messages of each type we keep before - * we start purging. + * + * @param cleanupIntervalSeconds + * the interval at which old messages are purged to prevent the + * message stores from growing unbounded + * @param messageAgeLimitSeconds + * the maximum age of a message in seconds before it is purged from + * the store + * @param maxUniqueMessages + * the maximum number of unique messages of each type we keep before + * we start purging */ public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds, long messageAgeLimitSeconds, int maxUniqueMessages) { @@ -138,20 +143,6 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { this.setThreshold(Level.WARN); } - private static String join(CharSequence separator, String[] strings) { - StringBuilder sb = new StringBuilder(); - boolean first = true; - for (String s : strings) { - if (first) { - first = false; - } else { - sb.append(separator); - } - sb.append(s); - } - return sb.toString(); - } - /** * {@inheritDoc} */ @@ -160,7 +151,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { String message = event.getRenderedMessage(); String[] throwableStr = event.getThrowableStrRep(); if (throwableStr != null) { - message = message + "\n" + join("\n", throwableStr); + message = message + "\n" + StringUtils.join("\n", throwableStr); message = org.apache.commons.lang3.StringUtils.left(message, MAX_MESSAGE_SIZE); } @@ -241,7 +232,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * getErrorMessagesAndCounts since the message store is purged at regular * intervals to prevent it from growing without bounds, while the store for * the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -257,7 +248,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * getWarningMessagesAndCounts since the message store is purged at regular * intervals to prevent it from growing without bounds, while the store for * the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -294,7 +285,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * differ from the ones provided by getErrorCounts since the message store is * purged at regular intervals to prevent it from growing without bounds, * while the store for the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -313,7 +304,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { * may differ from the ones provided by getWarningCounts since the message * store is purged at regular intervals to prevent it from growing without * bounds, while the store for the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -331,7 +322,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { SortedSet purgeInformation) { if (purgeInformation.size() > maxUniqueMessages) { ErrorAndWarningsCleanup cleanup = new ErrorAndWarningsCleanup(); - long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000); + long cutoff = Time.now() - (messageAgeLimitSeconds * 1000); cutoff = (cutoff / 1000); cleanup.cleanupMessages(map, purgeInformation, cutoff, maxUniqueMessages); } @@ -388,7 +379,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { @Override public void run() { - long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000); + long cutoff = Time.now() - (messageAgeLimitSeconds * 1000); cutoff = (cutoff / 1000); cleanupMessages(errors, errorsPurgeInformation, cutoff, maxUniqueMessages); cleanupMessages(warnings, warningsPurgeInformation, cutoff, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java index 4fc87d95b6c..fa5a5870c4e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java @@ -32,7 +32,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; @@ -41,6 +40,7 @@ import org.apache.hadoop.yarn.logaggregation.filecontroller.tfile.LogAggregation import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcase; import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder; import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder.AppDescriptor; +import org.apache.log4j.Level; import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT; import static org.apache.hadoop.yarn.logaggregation.LogAggregationTestUtils.enableFileControllers; @@ -67,7 +67,7 @@ public class TestAggregatedLogDeletionService { @BeforeAll public static void beforeClass() { - HadoopLoggerUtils.setLogLevel("root", "DEBUG"); + org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG); } @BeforeEach diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java index 0fd2841fcd0..346239f8e1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java @@ -28,7 +28,6 @@ import org.slf4j.LoggerFactory; import org.slf4j.Marker; import org.slf4j.MarkerFactory; -import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.LogManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java index c04fba0a17c..6b0570a32e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java @@ -22,7 +22,7 @@ import com.google.inject.Inject; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java index 05031adc5cd..41285467489 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java @@ -24,7 +24,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.GenericsUtil; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.security.AdminACLsManager; -import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java index 8e24e8cd6b5..87d511b1725 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java index 12b6dd7f691..c8496193933 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java @@ -50,12 +50,11 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertEquals; @@ -64,7 +63,8 @@ import static org.junit.Assert.assertFalse; public class TestContainersMonitorResourceChange { - static final Logger LOG = LoggerFactory.getLogger(TestContainersMonitorResourceChange.class); + static final Logger LOG = Logger + .getLogger(TestContainersMonitorResourceChange.class); private ContainersMonitorImpl containersMonitor; private MockExecutor executor; private Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 7ea8a6209e7..9d096d20c5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -245,13 +245,6 @@ test - - org.apache.hadoop - hadoop-logging - test - test-jar - - org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java index 80cc9fc8fd9..dc69eba2bbb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java @@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.log4j.Logger; import java.util.Collections; import java.util.HashMap; @@ -41,9 +42,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.LongBinaryOperator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * In-memory mapping between applications/container-tags and nodes/racks. * Required by constrained affinity/anti-affinity and cardinality placement. @@ -52,7 +50,8 @@ import org.slf4j.LoggerFactory; @InterfaceStability.Unstable public class AllocationTagsManager { - private static final Logger LOG = LoggerFactory.getLogger(AllocationTagsManager.class); + private static final Logger LOG = Logger.getLogger( + AllocationTagsManager.class); private ReentrantReadWriteLock.ReadLock readLock; private ReentrantReadWriteLock.WriteLock writeLock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java index 15e2d34b001..c17d4f6d7b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java @@ -22,7 +22,7 @@ import com.google.inject.Inject; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java index 12b017a921b..9a85315628f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java @@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -232,8 +231,8 @@ public class TestFederationRMStateStoreService { conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INITIAL_DELAY, 10); conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId()); - LogCapturer logCapture = - LogCapturer.captureLogs(FederationStateStoreService.LOG); + GenericTestUtils.LogCapturer logCapture = + GenericTestUtils.LogCapturer.captureLogs(FederationStateStoreService.LOG); final MockRM rm = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java index dc2d18d5526..a1989d5c0c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java @@ -28,13 +28,17 @@ import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.hadoop.logging.LogCapturer; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -79,7 +83,6 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import org.slf4j.LoggerFactory; public class TestSystemMetricsPublisherForV2 { @@ -298,15 +301,42 @@ public class TestSystemMetricsPublisherForV2 { @Test(timeout = 10000) public void testPutEntityWhenNoCollector() throws Exception { // Validating the logs as DrainDispatcher won't throw exception - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + class TestAppender extends AppenderSkeleton { + private final List log = new ArrayList<>(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override + public void close() { + } + + public List getLog() { + return new ArrayList<>(log); + } + } + + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + try { RMApp app = createRMApp(ApplicationId.newInstance(0, 1)); metricsPublisher.appCreated(app, app.getStartTime()); dispatcher.await(); - assertFalse("Dispatcher Crashed", - logCapturer.getOutput().contains("Error in dispatcher thread")); + for (LoggingEvent event : appender.getLog()) { + assertFalse("Dispatcher Crashed", + event.getRenderedMessage().contains("Error in dispatcher thread")); + } } finally { - logCapturer.stopCapturing(); + logger.removeAppender(appender); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java index 07630f54618..2e7b01ed50d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java @@ -18,11 +18,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; import org.junit.Test; import java.io.IOException; import java.util.Map; @@ -156,7 +157,7 @@ public class TestProportionalCapacityPreemptionPolicyPreemptToBalance @Test public void testPreemptionToBalanceWithVcoreResource() throws IOException { - HadoopLoggerUtils.setLogLevel("root", "DEBUG"); + Logger.getRootLogger().setLevel(Level.DEBUG); String labelsConfig = "=100:100,true"; // default partition String nodesConfig = "n1="; // only one node String queuesConfig = @@ -194,7 +195,7 @@ public class TestProportionalCapacityPreemptionPolicyPreemptToBalance @Test public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException { - HadoopLoggerUtils.setLogLevel("root", "DEBUG"); + Logger.getRootLogger().setLevel(Level.DEBUG); String labelsConfig = "=100:100,true"; // default partition String nodesConfig = "n1="; // only one node String queuesConfig = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java index c6066fd2085..024ec86f7d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -16,7 +16,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; -import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions; import org.slf4j.Logger; @@ -111,7 +110,8 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { public void setup() { resetResourceInformationMap(); - HadoopLoggerUtils.setLogLevel("root", "DEBUG"); + org.apache.log4j.Logger.getRootLogger().setLevel( + org.apache.log4j.Level.DEBUG); conf = new CapacitySchedulerConfiguration(new Configuration(false)); conf.setLong( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java index c5add68f8ec..6aaa15f3e18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java @@ -25,10 +25,9 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.log4j.Logger; import org.junit.Before; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import static junit.framework.TestCase.fail; @@ -38,7 +37,8 @@ import static junit.framework.TestCase.fail; * the invariant throws in case the invariants are not respected. */ public class TestMetricsInvariantChecker { - public final static Logger LOG = LoggerFactory.getLogger(TestMetricsInvariantChecker.class); + public final static Logger LOG = + Logger.getLogger(TestMetricsInvariantChecker.class); private MetricsSystem metricsSystem; private MetricsInvariantChecker ic; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java index 68bbc94f97f..38fbcd84153 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -30,13 +29,19 @@ import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProv import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import org.slf4j.LoggerFactory; import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue; import static org.junit.Assert.assertEquals; @@ -49,6 +54,29 @@ public class TestFairSchedulerConfiguration { private static final String A_CUSTOM_RESOURCE = "a-custom-resource"; + private static class TestAppender extends AppenderSkeleton { + + private final List logEvents = new CopyOnWriteArrayList<>(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + public void close() { + } + + @Override + protected void append(LoggingEvent arg0) { + logEvents.add(arg0); + } + + private List getLogEvents() { + return logEvents; + } + } + @Rule public ExpectedException exception = ExpectedException.none(); @@ -723,7 +751,9 @@ public class TestFairSchedulerConfiguration { @Test public void testMemoryIncrementConfiguredViaMultipleProperties() { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + TestAppender testAppender = new TestAppender(); + Logger logger = LogManager.getRootLogger(); + logger.addAppender(testAppender); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-mb", "7"); @@ -733,19 +763,23 @@ public class TestFairSchedulerConfiguration { FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13L, increment.getMemorySize()); - assertTrue("Warning message is not logged when specifying memory " - + "increment via multiple properties", logCapturer.getOutput().contains("Configuration " - + "yarn.resource-types.memory-mb.increment-allocation=13 is " - + "overriding the yarn.scheduler.increment-allocation-mb=7 " - + "property")); + assertTrue("Warning message is not logged when specifying memory " + + "increment via multiple properties", + testAppender.getLogEvents().stream().anyMatch( + e -> e.getLevel() == Level.WARN && ("Configuration " + + "yarn.resource-types.memory-mb.increment-allocation=13 is " + + "overriding the yarn.scheduler.increment-allocation-mb=7 " + + "property").equals(e.getMessage()))); } finally { - logCapturer.stopCapturing(); + logger.removeAppender(testAppender); } } @Test public void testCpuIncrementConfiguredViaMultipleProperties() { - LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + TestAppender testAppender = new TestAppender(); + Logger logger = LogManager.getRootLogger(); + logger.addAppender(testAppender); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-vcores", "7"); @@ -755,13 +789,15 @@ public class TestFairSchedulerConfiguration { FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13, increment.getVirtualCores()); - assertTrue("Warning message is not logged when specifying CPU vCores " - + "increment via multiple properties", logCapturer.getOutput().contains("Configuration " - + "yarn.resource-types.vcores.increment-allocation=13 is " - + "overriding the yarn.scheduler.increment-allocation-vcores=7 " - + "property")); + assertTrue("Warning message is not logged when specifying CPU vCores " + + "increment via multiple properties", + testAppender.getLogEvents().stream().anyMatch( + e -> e.getLevel() == Level.WARN && ("Configuration " + + "yarn.resource-types.vcores.increment-allocation=13 is " + + "overriding the yarn.scheduler.increment-allocation-vcores=7 " + + "property").equals(e.getMessage()))); } finally { - logCapturer.stopCapturing(); + logger.removeAppender(testAppender); } } }