Revert "HADOOP-18207. Introduce hadoop-logging module (#5503)"

This reverts commit 03a499821c.
This commit is contained in:
Ayush Saxena 2023-06-05 09:34:40 +05:30
parent ee94f6cdcb
commit 1d0c9ab433
No known key found for this signature in database
GPG Key ID: D09AE71061AB564D
120 changed files with 1106 additions and 1165 deletions

View File

@ -46,6 +46,16 @@
<artifactId>slf4j-api</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>runtime</scope>
</dependency>
</dependencies>
<build>

View File

@ -82,14 +82,14 @@
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>

View File

@ -15,7 +15,8 @@ package org.apache.hadoop.security.authentication.util;
import java.util.Random;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.Assert;
import org.junit.Test;
@ -29,8 +30,9 @@ public class TestRandomSignerSecretProvider {
private final int timeout = 500;
private final long rolloverFrequency = timeout / 2;
static {
HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
{
LogManager.getLogger(
RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
}
@Test

View File

@ -19,7 +19,8 @@ import java.util.Random;
import javax.servlet.ServletContext;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -38,8 +39,9 @@ public class TestZKSignerSecretProvider {
private final int timeout = 100;
private final long rolloverFrequency = timeout / 2;
static {
HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
{
LogManager.getLogger(
RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
}
@Before

View File

@ -419,16 +419,6 @@
<artifactId>lz4-java</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<build>

View File

@ -299,7 +299,7 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex}
yarn.ewma.cleanupInterval=300
yarn.ewma.messageAgeLimitSeconds=86400
yarn.ewma.maxUniqueMessages=250
log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender
log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.ssl.SSLFactory;
@ -51,6 +50,8 @@ import org.apache.hadoop.util.GenericsUtil;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/**
* Change log level in runtime.
@ -348,7 +349,7 @@ public class LogLevel {
}
if (GenericsUtil.isLog4jLogger(logName)) {
process(logName, level, out);
process(Logger.getLogger(logName), level, out);
} else {
out.println("Sorry, setting log level is only supported for log4j loggers.<br />");
}
@ -367,17 +368,19 @@ public class LogLevel {
+ "<input type='submit' value='Set Log Level' />"
+ "</form>";
private static void process(String log, String level, PrintWriter out) {
private static void process(Logger log, String level,
PrintWriter out) throws IOException {
if (level != null) {
try {
HadoopLoggerUtils.setLogLevel(log, level);
out.println(MARKER + "Setting Level to " + level + " ...<br />");
} catch (IllegalArgumentException e) {
if (!level.equalsIgnoreCase(Level.toLevel(level)
.toString())) {
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
} else {
log.setLevel(Level.toLevel(level));
out.println(MARKER + "Setting Level to " + level + " ...<br />");
}
}
out.println(MARKER + "Effective Level: <b>" + HadoopLoggerUtils.getEffectiveLevel(log)
+ "</b><br />");
out.println(MARKER
+ "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
}
}

View File

@ -40,8 +40,8 @@ import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.log4j.LogManager;
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
@ -761,7 +761,7 @@ public class StringUtils {
public void run() {
log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
"Shutting down " + classname + " at " + hostname}));
HadoopLoggerUtils.shutdownLogManager();
LogManager.shutdown();
}
}, SHUTDOWN_HOOK_PRIORITY);

View File

@ -68,7 +68,6 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
@ -77,8 +76,10 @@ import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;
public class TestConfiguration {
@ -219,7 +220,9 @@ public class TestConfiguration {
InputStream in2 = new ByteArrayInputStream(bytes2);
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
TestAppender appender = new TestAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
try {
// Add the 2 different resources - this should generate a warning
@ -227,13 +230,17 @@ public class TestConfiguration {
conf.addResource(in2);
assertEquals("should see the first value", "A", conf.get("prop"));
String renderedMessage = logCapturer.getOutput();
assertTrue("did not see expected string inside message " + renderedMessage,
renderedMessage.contains(
"an attempt to override final parameter: " + "prop; Ignoring."));
List<LoggingEvent> events = appender.getLog();
assertEquals("overriding a final parameter should cause logging", 1,
events.size());
LoggingEvent loggingEvent = events.get(0);
String renderedMessage = loggingEvent.getRenderedMessage();
assertTrue("did not see expected string inside message "+ renderedMessage,
renderedMessage.contains("an attempt to override final parameter: "
+ "prop; Ignoring."));
} finally {
// Make sure the appender is removed
logCapturer.stopCapturing();
logger.removeAppender(appender);
}
}
@ -251,7 +258,9 @@ public class TestConfiguration {
InputStream in2 = new ByteArrayInputStream(bytes);
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
TestAppender appender = new TestAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
try {
// Add the resource twice from a stream - should not generate warnings
@ -259,15 +268,20 @@ public class TestConfiguration {
conf.addResource(in2);
assertEquals("A", conf.get("prop"));
String appenderOutput = logCapturer.getOutput();
List<LoggingEvent> events = appender.getLog();
for (LoggingEvent loggingEvent : events) {
System.out.println("Event = " + loggingEvent.getRenderedMessage());
}
assertTrue("adding same resource twice should not cause logging",
appenderOutput.isEmpty());
events.isEmpty());
} finally {
// Make sure the appender is removed
logCapturer.stopCapturing();
logger.removeAppender(appender);
}
}
@Test
public void testFinalWarningsMultiple() throws Exception {
// Make a configuration file with a repeated final property
@ -281,19 +295,24 @@ public class TestConfiguration {
InputStream in1 = new ByteArrayInputStream(bytes);
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
TestAppender appender = new TestAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
try {
// Add the resource - this should not produce a warning
conf.addResource(in1);
assertEquals("should see the value", "A", conf.get("prop"));
String appenderOutput = logCapturer.getOutput();
List<LoggingEvent> events = appender.getLog();
for (LoggingEvent loggingEvent : events) {
System.out.println("Event = " + loggingEvent.getRenderedMessage());
}
assertTrue("adding same resource twice should not cause logging",
appenderOutput.isEmpty());
events.isEmpty());
} finally {
// Make sure the appender is removed
logCapturer.stopCapturing();
logger.removeAppender(appender);
}
}
@ -310,20 +329,48 @@ public class TestConfiguration {
InputStream in1 = new ByteArrayInputStream(bytes);
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
TestAppender appender = new TestAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
try {
// Add the resource - this should produce a warning
conf.addResource(in1);
assertEquals("should see the value", "A", conf.get("prop"));
String renderedMessage = logCapturer.getOutput();
assertTrue("did not see expected string inside message " + renderedMessage,
renderedMessage.contains(
"an attempt to override final parameter: " + "prop; Ignoring."));
List<LoggingEvent> events = appender.getLog();
assertEquals("overriding a final parameter should cause logging", 1,
events.size());
LoggingEvent loggingEvent = events.get(0);
String renderedMessage = loggingEvent.getRenderedMessage();
assertTrue("did not see expected string inside message "+ renderedMessage,
renderedMessage.contains("an attempt to override final parameter: "
+ "prop; Ignoring."));
} finally {
// Make sure the appender is removed
logCapturer.stopCapturing();
logger.removeAppender(appender);
}
}
/**
* A simple appender for white box testing.
*/
private static class TestAppender extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<>();
@Override public boolean requiresLayout() {
return false;
}
@Override protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<>(log);
}
}

View File

@ -36,9 +36,8 @@ import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.log4j.Logger;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@ -48,6 +47,9 @@ import static org.junit.Assert.*;
public class CompressDecompressTester<T extends Compressor, E extends Decompressor> {
private static final Logger logger = Logger
.getLogger(CompressDecompressTester.class);
private final byte[] originalRawData;
private ImmutableList<TesterPair<T, E>> pairs = ImmutableList.of();
@ -486,12 +488,12 @@ public class CompressDecompressTester<T extends Compressor, E extends Decompress
return false;
}
abstract static class TesterCompressionStrategy {
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final Logger logger = Logger.getLogger(getClass());
abstract void assertCompression(String name, Compressor compressor, Decompressor decompressor,
byte[] originalRawData) throws Exception;
abstract void assertCompression(String name, Compressor compressor,
Decompressor decompressor, byte[] originalRawData) throws Exception;
}
}

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.log.LogLevel.CLI;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
@ -41,11 +40,12 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.Assert;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLException;
@ -67,7 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
private final String logName = TestLogLevel.class.getName();
private String clientPrincipal;
private String serverPrincipal;
private final Logger log = LoggerFactory.getLogger(logName);
private final Logger log = Logger.getLogger(logName);
private final static String PRINCIPAL = "loglevel.principal";
private final static String KEYTAB = "loglevel.keytab";
private static final String PREFIX = "hadoop.http.authentication.";
@ -76,7 +76,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
public static void setUp() throws Exception {
org.slf4j.Logger logger =
LoggerFactory.getLogger(KerberosAuthenticator.class);
HadoopLoggerUtils.setLogLevel(logger.getName(), "DEBUG");
GenericTestUtils.setLogLevel(logger, Level.DEBUG);
FileUtil.fullyDelete(BASEDIR);
if (!BASEDIR.mkdirs()) {
throw new Exception("unable to create the base directory for testing");
@ -230,7 +230,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
final String connectProtocol, final boolean isSpnego)
throws Exception {
testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego,
"DEBUG");
Level.DEBUG.toString());
}
/**
@ -250,8 +250,9 @@ public class TestLogLevel extends KerberosSecurityTestcase {
if (!LogLevel.isValidProtocol(connectProtocol)) {
throw new Exception("Invalid client protocol " + connectProtocol);
}
String oldLevel = HadoopLoggerUtils.getEffectiveLevel(log.getName());
Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.", "ERROR", oldLevel);
Level oldLevel = log.getEffectiveLevel();
Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.",
Level.ERROR, oldLevel);
// configs needed for SPNEGO at server side
if (isSpnego) {
@ -287,7 +288,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
});
server.stop();
// restore log level
HadoopLoggerUtils.setLogLevel(log.getName(), oldLevel.toString());
GenericTestUtils.setLogLevel(log, oldLevel);
}
/**
@ -321,7 +322,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
cli.run(setLevelArgs);
assertEquals("new level not equal to expected: ", newLevel.toUpperCase(),
HadoopLoggerUtils.getEffectiveLevel(log.getName()));
log.getEffectiveLevel().toString());
}
/**

View File

@ -42,9 +42,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import static org.junit.Assert.assertEquals;
import org.apache.log4j.Logger;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
@ -242,7 +241,7 @@ public class TestMetricsSourceAdapter {
private MetricsSourceAdapter sa = null;
private ScheduledFuture<?> future = null;
private AtomicBoolean hasError = null;
private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class);
private static final Logger LOG = Logger.getLogger(SourceUpdater.class);
public SourceUpdater(MetricsSourceAdapter sourceAdapter,
AtomicBoolean err) {
@ -264,7 +263,7 @@ public class TestMetricsSourceAdapter {
} catch (Exception e) {
// catch all errors
hasError.set(true);
LOG.error("Something went wrong.", e);
LOG.error(e.getStackTrace());
} finally {
if (hasError.get()) {
LOG.error("Hit error, stopping now");
@ -285,7 +284,7 @@ public class TestMetricsSourceAdapter {
private int cnt = 0;
private ScheduledFuture<?> future = null;
private AtomicBoolean hasError = null;
private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class);
private static final Logger LOG = Logger.getLogger(SourceReader.class);
public SourceReader(
TestMetricsSource source, MetricsSourceAdapter sourceAdapter,
@ -319,7 +318,7 @@ public class TestMetricsSourceAdapter {
} catch (Exception e) {
// catch other errors
hasError.set(true);
LOG.error("Something went wrong.", e);
LOG.error(e.getStackTrace());
} finally {
if (hasError.get()) {
future.cancel(false);

View File

@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
@ -41,8 +41,8 @@ public class TestShellBasedUnixGroupsMapping {
private static final Logger TESTLOG =
LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class);
private final LogCapturer shellMappingLog =
LogCapturer.captureLogs(
private final GenericTestUtils.LogCapturer shellMappingLog =
GenericTestUtils.LogCapturer.captureLogs(
ShellBasedUnixGroupsMapping.LOG);
private class TestGroupUserNotExist

View File

@ -19,8 +19,6 @@ package org.apache.hadoop.security.ssl;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.junit.BeforeClass;
import org.junit.Test;
@ -44,7 +42,7 @@ public class TestReloadingX509KeyManager {
private static final String BASEDIR = GenericTestUtils.getTempPath(
TestReloadingX509TrustManager.class.getSimpleName());
private final LogCapturer reloaderLog = LogCapturer.captureLogs(
private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs(
FileMonitoringTimerTask.LOG);
@BeforeClass

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.security.ssl;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import java.util.function.Supplier;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.service;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
import java.io.PrintWriter;
import static org.apache.hadoop.logging.LogCapturer.captureLogs;
import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.times;

View File

@ -25,6 +25,7 @@ import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PrintStream;
import java.io.StringWriter;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
@ -37,6 +38,7 @@ import java.util.Locale;
import java.util.Objects;
import java.util.Random;
import java.util.Set;
import java.util.Enumeration;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
@ -51,11 +53,17 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
import org.apache.hadoop.util.DurationInfo;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Appender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.WriterAppender;
import org.junit.Assert;
import org.junit.Assume;
import org.mockito.invocation.InvocationOnMock;
@ -107,17 +115,51 @@ public abstract class GenericTestUtils {
public static final String ERROR_INVALID_ARGUMENT =
"Total wait time should be greater than check interval time";
@Deprecated
public static Logger toLog4j(org.slf4j.Logger logger) {
return LogManager.getLogger(logger.getName());
}
/**
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
*/
@Deprecated
public static void disableLog(Logger logger) {
logger.setLevel(Level.OFF);
}
public static void disableLog(org.slf4j.Logger logger) {
HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF");
disableLog(toLog4j(logger));
}
public static void setLogLevel(Logger logger, Level level) {
logger.setLevel(level);
}
/**
* @deprecated
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
*/
@Deprecated
public static void setLogLevel(org.slf4j.Logger logger, Level level) {
setLogLevel(toLog4j(logger), level);
}
public static void setLogLevel(org.slf4j.Logger logger,
org.slf4j.event.Level level) {
HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString());
setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
}
public static void setRootLogLevel(org.slf4j.event.Level level) {
HadoopLoggerUtils.setLogLevel("root", level.toString());
setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
}
public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) {
for (Enumeration<?> loggers = LogManager.getCurrentLoggers();
loggers.hasMoreElements();) {
Logger logger = (Logger) loggers.nextElement();
logger.setLevel(Level.toLevel(level.toString()));
}
}
public static org.slf4j.event.Level toLevel(String level) {
@ -429,6 +471,47 @@ public abstract class GenericTestUtils {
}
}
public static class LogCapturer {
private StringWriter sw = new StringWriter();
private WriterAppender appender;
private Logger logger;
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
if (logger.getName().equals("root")) {
return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
}
return new LogCapturer(toLog4j(logger));
}
public static LogCapturer captureLogs(Logger logger) {
return new LogCapturer(logger);
}
private LogCapturer(Logger logger) {
this.logger = logger;
Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
if (defaultAppender == null) {
defaultAppender = Logger.getRootLogger().getAppender("console");
}
final Layout layout = (defaultAppender == null) ? new PatternLayout() :
defaultAppender.getLayout();
this.appender = new WriterAppender(layout, sw);
logger.addAppender(this.appender);
}
public String getOutput() {
return sw.toString();
}
public void stopCapturing() {
logger.removeAppender(appender);
}
public void clearOutput() {
sw.getBuffer().setLength(0);
}
}
/**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.

View File

@ -26,8 +26,6 @@ import org.slf4j.LoggerFactory;
import java.util.function.Supplier;
import org.slf4j.event.Level;
import org.apache.hadoop.logging.LogCapturer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;

View File

@ -22,8 +22,8 @@ import java.io.File;
import org.junit.Assert;
import org.apache.log4j.Logger;
import org.junit.Test;
import org.slf4j.Logger;
public class TestClassUtil {
@Test(timeout=10000)
@ -35,6 +35,6 @@ public class TestClassUtil {
Assert.assertTrue("Containing jar does not exist on file system ",
jarFile.exists());
Assert.assertTrue("Incorrect jar file " + containingJar,
jarFile.getName().matches("slf4j-api.*[.]jar"));
jarFile.getName().matches("log4j.*[.]jar"));
}
}

View File

@ -28,7 +28,7 @@ import java.util.List;
import static org.junit.Assert.*;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.assertj.core.api.Assertions;
import org.junit.Before;
import org.junit.Test;

View File

@ -28,12 +28,10 @@ import java.util.Iterator;
import java.util.Random;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.util.hash.Hash;
import org.apache.log4j.Logger;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
@ -115,7 +113,7 @@ public class BloomFilterCommonTester<T extends Filter> {
}
interface FilterTesterStrategy {
Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class);
final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives);

View File

@ -53,12 +53,6 @@
<artifactId>hadoop-auth</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>

View File

@ -49,7 +49,6 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Time;
import org.apache.http.client.utils.URIBuilder;
import org.junit.After;
@ -584,8 +583,8 @@ public class TestKMS {
@Test
public void testStartStopHttpPseudo() throws Exception {
// Make sure bogus errors don't get emitted.
LogCapturer logs =
LogCapturer.captureLogs(LoggerFactory.getLogger(
GenericTestUtils.LogCapturer logs =
GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(
"com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator"));
try {
testStartStop(false, false);

View File

@ -18,24 +18,23 @@
package org.apache.hadoop.crypto.key.kms.server;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FilterOutputStream;
import java.io.InputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Paths;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ThreadUtil;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -68,23 +67,24 @@ public class TestKMSAudit {
public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS);
@Before
public void setUp() throws IOException, URISyntaxException {
public void setUp() throws IOException {
originalOut = System.err;
memOut = new ByteArrayOutputStream();
filterOut = new FilterOut(memOut);
capturedOut = new PrintStream(filterOut);
System.setErr(capturedOut);
URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties");
File file = Paths.get(url.toURI()).toFile();
HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath());
InputStream is =
ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties");
PropertyConfigurator.configure(is);
IOUtils.closeStream(is);
Configuration conf = new Configuration();
this.kmsAudit = new KMSAudit(conf);
}
@After
public void cleanUp() throws Exception {
public void cleanUp() {
System.setErr(originalOut);
HadoopLoggerUtils.resetConfiguration();
LogManager.resetConfiguration();
kmsAudit.shutdown();
}

View File

@ -1,23 +0,0 @@
<FindBugsFilter>
<!--
conversionPattern is only set once and used to initiate PatternLayout object
only once. It is set by log4j framework if set as part of log4j properties and accessed
only during first append operation.
-->
<Match>
<Class name="org.apache.hadoop.logging.appenders.AsyncRFAAppender"/>
<Field name="conversionPattern"/>
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
<!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
<Match>
<Class name="org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender$Element"/>
<Or>
<Field name="count"/>
<Field name="timestampSeconds"/>
</Or>
<Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD"/>
</Match>
</FindBugsFilter>

View File

@ -1,125 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>hadoop-project</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>3.4.0-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hadoop-logging</artifactId>
<version>3.4.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>Apache Hadoop Logging</name>
<description>Logging Support for Apache Hadoop project</description>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<phase>prepare-package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<attach>true</attach>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<id>prepare-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
<execution>
<id>prepare-test-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
<exclude>dev-support/findbugsExcludeFile.xml</exclude>
</excludes>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<configuration>
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,145 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.logging;
import java.io.FileInputStream;
import java.io.Flushable;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Enumeration;
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.log4j.Appender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
/**
* Hadoop's internal class that access log4j APIs directly.
* <p/>
* This class will depend on log4j directly, so callers should not use this class directly to avoid
* introducing log4j dependencies to downstream users. Please call the methods in
* {@link HadoopLoggerUtils}, as they will call the methods here through reflection.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
final class HadoopInternalLog4jUtils {
private HadoopInternalLog4jUtils() {
}
static void setLogLevel(String loggerName, String levelName) {
if (loggerName == null) {
throw new IllegalArgumentException("logger name cannot be null");
}
Logger logger = loggerName.equalsIgnoreCase("root") ?
LogManager.getRootLogger() :
LogManager.getLogger(loggerName);
Level level = Level.toLevel(levelName.toUpperCase());
if (!level.toString().equalsIgnoreCase(levelName)) {
throw new IllegalArgumentException("Unsupported log level " + levelName);
}
logger.setLevel(level);
}
static void shutdownLogManager() {
LogManager.shutdown();
}
static String getEffectiveLevel(String loggerName) {
Logger logger = loggerName.equalsIgnoreCase("root") ?
LogManager.getRootLogger() :
LogManager.getLogger(loggerName);
return logger.getEffectiveLevel().toString();
}
static void resetConfiguration() {
LogManager.resetConfiguration();
}
static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) throws Exception {
Properties customProperties = new Properties();
try (FileInputStream fs = new FileInputStream(log4jPath);
InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
customProperties.load(fs);
Properties originalProperties = new Properties();
originalProperties.load(is);
for (Map.Entry<Object, Object> entry : customProperties.entrySet()) {
originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString());
}
LogManager.resetConfiguration();
PropertyConfigurator.configure(originalProperties);
}
}
static boolean hasAppenders(String logger) {
return Logger.getLogger(logger)
.getAllAppenders()
.hasMoreElements();
}
@SuppressWarnings("unchecked")
static void syncLogs() {
// flush standard streams
//
System.out.flush();
System.err.flush();
// flush flushable appenders
//
final Logger rootLogger = Logger.getRootLogger();
flushAppenders(rootLogger);
final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
getCurrentLoggers();
while (allLoggers.hasMoreElements()) {
final Logger l = allLoggers.nextElement();
flushAppenders(l);
}
}
@SuppressWarnings("unchecked")
private static void flushAppenders(Logger l) {
final Enumeration<Appender> allAppenders = l.getAllAppenders();
while (allAppenders.hasMoreElements()) {
final Appender a = allAppenders.nextElement();
if (a instanceof Flushable) {
try {
((Flushable) a).flush();
} catch (IOException ioe) {
System.err.println(a + ": Failed to flush!"
+ stringifyException(ioe));
}
}
}
}
private static String stringifyException(Throwable e) {
StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter);
e.printStackTrace(printWriter);
printWriter.close();
return stringWriter.toString();
}
}

View File

@ -1,142 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.logging;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A bridge class for operating on logging framework, such as changing log4j log level, etc.
* Will call the methods in {@link HadoopInternalLog4jUtils} to perform operations on log4j level.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class HadoopLoggerUtils {
private static final String INTERNAL_UTILS_CLASS =
"org.apache.hadoop.logging.HadoopInternalLog4jUtils";
private HadoopLoggerUtils() {
}
private static Method getMethod(String methodName, Class<?>... args) {
try {
Class<?> clazz = Class.forName(INTERNAL_UTILS_CLASS);
return clazz.getDeclaredMethod(methodName, args);
} catch (ClassNotFoundException | NoSuchMethodException e) {
throw new AssertionError("should not happen", e);
}
}
private static void throwUnchecked(Throwable throwable) {
if (throwable instanceof RuntimeException) {
throw (RuntimeException) throwable;
}
if (throwable instanceof Error) {
throw (Error) throwable;
}
}
public static void shutdownLogManager() {
Method method = getMethod("shutdownLogManager");
try {
method.invoke(null);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
public static void setLogLevel(String loggerName, String levelName) {
Method method = getMethod("setLogLevel", String.class, String.class);
try {
method.invoke(null, loggerName, levelName);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
public static String getEffectiveLevel(String loggerName) {
Method method = getMethod("getEffectiveLevel", String.class);
try {
return (String) method.invoke(null, loggerName);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
public static void resetConfiguration() {
Method method = getMethod("resetConfiguration");
try {
method.invoke(null);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
public static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) {
Method method = getMethod("updateLog4jConfiguration", Class.class, String.class);
try {
method.invoke(null, targetClass, log4jPath);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
public static boolean hasAppenders(String logger) {
Method method = getMethod("hasAppenders", String.class);
try {
return (Boolean) method.invoke(null, logger);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
public synchronized static void syncLogs() {
Method method = getMethod("syncLogs");
try {
method.invoke(null);
} catch (IllegalAccessException e) {
throw new AssertionError("should not happen", e);
} catch (InvocationTargetException e) {
throwUnchecked(e.getCause());
throw new AssertionError("Failed to execute, should not happen", e.getCause());
}
}
}

View File

@ -1,65 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.logging;
import java.io.StringWriter;
import org.apache.log4j.Appender;
import org.apache.log4j.Layout;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.WriterAppender;
public class LogCapturer {
private final StringWriter sw = new StringWriter();
private final Appender appender;
private final Logger logger;
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
if (logger.getName().equals("root")) {
return new LogCapturer(Logger.getRootLogger());
}
return new LogCapturer(LogManager.getLogger(logger.getName()));
}
private LogCapturer(Logger logger) {
this.logger = logger;
Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
if (defaultAppender == null) {
defaultAppender = Logger.getRootLogger().getAppender("console");
}
final Layout layout =
(defaultAppender == null) ? new PatternLayout() : defaultAppender.getLayout();
this.appender = new WriterAppender(layout, sw);
logger.addAppender(this.appender);
}
public String getOutput() {
return sw.toString();
}
public void stopCapturing() {
logger.removeAppender(appender);
}
public void clearOutput() {
sw.getBuffer().setLength(0);
}
}

View File

@ -1,37 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.logging.test;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.logging.HadoopLoggerUtils;
public class TestSyncLogs {
private static final Logger LOG = LoggerFactory.getLogger(TestSyncLogs.class);
@Test
public void testSyncLogs() {
LOG.info("Testing log sync");
HadoopLoggerUtils.syncLogs();
}
}

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# log4j configuration used during build and unit tests
log4j.rootLogger=debug,stdout
log4j.threshold=ALL
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

View File

@ -38,6 +38,11 @@
<groupId>org.apache.kerby</groupId>
<artifactId>kerb-simplekdc</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>

View File

@ -38,7 +38,6 @@
<module>hadoop-minikdc</module>
<module>hadoop-kms</module>
<module>hadoop-registry</module>
<module>hadoop-logging</module>
</modules>
<build>

View File

@ -86,12 +86,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>netty-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>

View File

@ -31,7 +31,6 @@ import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONIT
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Lists;
import org.junit.Assert;
import org.junit.Test;
@ -62,8 +61,8 @@ public final class TestURLConnectionFactory {
public void testSSLInitFailure() throws Exception {
Configuration conf = new Configuration();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
LogCapturer logs =
LogCapturer.captureLogs(
GenericTestUtils.LogCapturer logs =
GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(URLConnectionFactory.class));
URLConnectionFactory.newDefaultURLConnectionFactory(conf);
Assert.assertTrue("Expected log for ssl init failure not found!",

View File

@ -182,12 +182,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>junit-jupiter-params</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<build>

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod;
import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX;
import static org.junit.Assert.assertEquals;
@ -49,8 +48,8 @@ public class TestRouterRefreshFairnessPolicyController {
private static final Logger LOG =
LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class);
private final LogCapturer controllerLog =
LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
private final GenericTestUtils.LogCapturer controllerLog =
GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
private StateStoreDFSCluster cluster;

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.federation.router.FederationUtil;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.slf4j.LoggerFactory;
@ -179,7 +179,7 @@ public class TestRouterRpcFairnessPolicyController {
private void verifyInstantiationError(Configuration conf, int handlerCount,
int totalDedicatedHandlers) {
LogCapturer logs = LogCapturer
GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer
.captureLogs(LoggerFactory.getLogger(
StaticRouterRpcFairnessPolicyController.class));
try {

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@ -54,7 +55,6 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@ -322,7 +322,11 @@ public class TestRouterNamenodeMonitoring {
int httpsRequests, int requestsPerService) {
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final LogVerificationAppender appender =
new LogVerificationAppender();
final org.apache.log4j.Logger logger =
org.apache.log4j.Logger.getRootLogger();
logger.addAppender(appender);
GenericTestUtils.setRootLogLevel(Level.DEBUG);
// Setup and start the Router
@ -343,11 +347,8 @@ public class TestRouterNamenodeMonitoring {
heartbeatService.getNamenodeStatusReport();
}
}
assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
"JMX URL: https://"));
assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
"JMX URL: http://"));
logCapturer.stopCapturing();
assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://"));
assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://"));
}
/**

View File

@ -135,8 +135,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
@ -2069,8 +2067,8 @@ public class TestRouterRpc {
@Test
public void testMkdirsWithCallerContext() throws IOException {
LogCapturer auditlog =
LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
GenericTestUtils.LogCapturer auditlog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null
assertNull(CallerContext.getCurrent());
@ -2096,8 +2094,8 @@ public class TestRouterRpc {
@Test
public void testRealUserPropagationInCallerContext()
throws IOException, InterruptedException {
LogCapturer auditlog =
LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
GenericTestUtils.LogCapturer auditlog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null
assertNull(CallerContext.getCurrent());
@ -2141,8 +2139,8 @@ public class TestRouterRpc {
@Test
public void testAddClientIpPortToCallerContext() throws IOException {
LogCapturer auditLog =
LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientIp and ClientPort are not set on the client.
// Set client context.
@ -2176,8 +2174,8 @@ public class TestRouterRpc {
@Test
public void testAddClientIdAndCallIdToCallerContext() throws IOException {
LogCapturer auditLog =
LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientId and ClientCallId are not set on the client.
// Set client context.

View File

@ -72,8 +72,6 @@ import org.apache.hadoop.ipc.CallerContext;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.junit.Test;
import org.slf4j.event.Level;
@ -278,10 +276,12 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
@Test
public void testPreviousBlockNotNull()
throws IOException, URISyntaxException {
final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog);
final GenericTestUtils.LogCapturer stateChangeLog =
GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG);
final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG);
final GenericTestUtils.LogCapturer nameNodeLog =
GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG);
GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG);
final FederationRPCMetrics metrics = getRouterContext().
@ -454,8 +454,8 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
@Test
public void testCallerContextWithMultiDestinations() throws IOException {
LogCapturer auditLog =
LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// set client context
CallerContext.setCurrent(

View File

@ -310,4 +310,14 @@
<Method name="reconcile" />
<Bug pattern="SWL_SLEEP_WITH_LOCK_HELD" />
</Match>
<!--
conversionPattern is only set once and used to initiate PatternLayout object
only once. It is set by log4j framework if set as part of log4j properties and accessed
only during first append operation.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.util.AsyncRFAAppender"/>
<Field name="conversionPattern"/>
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
</FindBugsFilter>

View File

@ -164,12 +164,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>

View File

@ -31,8 +31,6 @@ import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.metrics2.util.MBeans;
/**
@ -113,8 +111,11 @@ public class MetricsLoggerTask implements Runnable {
.substring(0, maxLogLineLength) + "...");
}
// TODO : hadoop-logging module to hide log4j implementation details, this method
// can directly call utility from hadoop-logging.
private static boolean hasAppenders(Logger logger) {
return HadoopLoggerUtils.hasAppenders(logger.getName());
return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders()
.hasMoreElements();
}
/**

View File

@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -110,13 +110,13 @@ public class FsImageValidation {
}
static void initLogLevels() {
Util.setLogLevel(FSImage.class, "TRACE");
Util.setLogLevel(FileJournalManager.class, "TRACE");
Util.setLogLevel(FSImage.class, Level.TRACE);
Util.setLogLevel(FileJournalManager.class, Level.TRACE);
Util.setLogLevel(GSet.class, "OFF");
Util.setLogLevel(BlockManager.class, "OFF");
Util.setLogLevel(DatanodeManager.class, "OFF");
Util.setLogLevel(TopMetrics.class, "OFF");
Util.setLogLevel(GSet.class, Level.OFF);
Util.setLogLevel(BlockManager.class, Level.OFF);
Util.setLogLevel(DatanodeManager.class, Level.OFF);
Util.setLogLevel(TopMetrics.class, Level.OFF);
}
static class Util {
@ -127,10 +127,11 @@ public class FsImageValidation {
+ ", max=" + StringUtils.byteDesc(runtime.maxMemory());
}
static void setLogLevel(Class<?> clazz, String level) {
HadoopLoggerUtils.setLogLevel(clazz.getName(), level);
static void setLogLevel(Class<?> clazz, Level level) {
final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
logger.setLevel(level);
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
HadoopLoggerUtils.getEffectiveLevel(clazz.getName()));
logger.getEffectiveLevel());
}
static String toCommaSeparatedNumber(long n) {

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.logging.appenders;
package org.apache.hadoop.hdfs.util;
import java.io.IOException;

View File

@ -0,0 +1,75 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.spi.ThrowableInformation;
/**
* Used to verify that certain exceptions or messages are present in log output.
*/
public class LogVerificationAppender extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override
public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<LoggingEvent>(log);
}
public int countExceptionsWithMessage(final String text) {
int count = 0;
for (LoggingEvent e: getLog()) {
ThrowableInformation t = e.getThrowableInformation();
if (t != null) {
String m = t.getThrowable().getMessage();
if (m.contains(text)) {
count++;
}
}
}
return count;
}
public int countLinesWithMessage(final String text) {
int count = 0;
for (LoggingEvent e: getLog()) {
String msg = e.getRenderedMessage();
if (msg != null && msg.contains(text)) {
count++;
}
}
return count;
}
}

View File

@ -33,8 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestDFSRename {
@ -190,8 +189,8 @@ public class TestDFSRename {
final DistributedFileSystem dfs = cluster.getFileSystem();
Path path = new Path("/test");
dfs.mkdirs(path);
LogCapturer auditLog =
LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
dfs.rename(path, new Path("/dir1"),
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
String auditOut = auditLog.getOutput();

View File

@ -45,9 +45,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import org.junit.Test;
import static org.junit.Assert.*;
@ -317,7 +317,9 @@ public class TestDFSUpgradeFromImage {
"imageMD5Digest", "22222222222222222222222222222222");
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// Upgrade should now fail
try {
@ -329,10 +331,9 @@ public class TestDFSUpgradeFromImage {
if (!msg.contains("Failed to load FSImage file")) {
throw ioe;
}
int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
int md5failures = appender.countExceptionsWithMessage(
" is corrupt with MD5 checksum of ");
assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
logCapturer.stopCapturing();
}
}

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -48,7 +48,7 @@ public class TestDataStream {
@Test(timeout = 60000)
public void testDfsClient() throws IOException, InterruptedException {
LogCapturer logs = LogCapturer.captureLogs(LoggerFactory
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory
.getLogger(DataStreamer.class));
byte[] toWrite = new byte[PACKET_SIZE];
new Random(1).nextBytes(toWrite);

View File

@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.junit.After;
import org.junit.Before;
@ -168,9 +168,9 @@ public class TestEncryptedTransfer {
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(SaslDataTransferServer.class));
LogCapturer logs1 = LogCapturer.captureLogs(
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@ -239,7 +239,7 @@ public class TestEncryptedTransfer {
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataNode.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@ -457,9 +457,9 @@ public class TestEncryptedTransfer {
fs = getFileSystem(conf);
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(SaslDataTransferServer.class));
LogCapturer logs1 = LogCapturer.captureLogs(
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataTransferSaslUtil.class));
try {
writeTestDataToFile(fs);

View File

@ -54,7 +54,7 @@ import org.apache.hadoop.http.HttpConfig.Policy;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
@ -138,7 +138,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataNode.class));
try {
doTest(clientConf);

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Before;

View File

@ -56,7 +56,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.slf4j.LoggerFactory;

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.Whitebox;
import org.assertj.core.api.Assertions;
@ -236,8 +235,8 @@ public class TestBlockManagerSafeMode {
public void testCheckSafeMode9() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000);
LogCapturer logs =
LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
GenericTestUtils.LogCapturer logs =
GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
fsn, true, conf);
String content = logs.getOutput();
@ -248,8 +247,8 @@ public class TestBlockManagerSafeMode {
public void testCheckSafeMode10(){
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1);
LogCapturer logs =
LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
GenericTestUtils.LogCapturer logs =
GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
fsn, true, conf);
String content = logs.getOutput();

View File

@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;
@ -575,7 +575,7 @@ public class TestPendingReconstruction {
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
DFSTestUtil.setNameNodeLogLevel(Level.DEBUG);
LogCapturer logs = LogCapturer
LogCapturer logs = GenericTestUtils.LogCapturer
.captureLogs(LoggerFactory.getLogger("BlockStateChange"));
BlockManager bm = cluster.getNamesystem().getBlockManager();
try {

View File

@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSI
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
@ -40,7 +41,6 @@ import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.AddBlockFlag;
import org.apache.hadoop.fs.ContentSummary;
@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.Block;
@ -66,15 +67,16 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.slf4j.LoggerFactory;
@RunWith(Parameterized.class)
public class TestReplicationPolicy extends BaseReplicationPolicyTest {
@ -505,26 +507,26 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// try to choose NUM_OF_DATANODES which is more than actually available
// nodes.
DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
assertEquals(targets.length, dataNodes.length - 2);
boolean isFound = false;
for (String logLine : logCapturer.getOutput().split("\n")) {
// Suppose to place replicas on each node but two data nodes are not
// available for placing replica, so here we expect a short of 2
if(logLine.contains("WARN") && logLine.contains("in need of 2")) {
isFound = true;
break;
}
}
assertTrue("Could not find the block placement log specific to 2 datanodes not being "
+ "available for placing replicas", isFound);
logCapturer.stopCapturing();
final List<LoggingEvent> log = appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry = log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
// Suppose to place replicas on each node but two data nodes are not
// available for placing replica, so here we expect a short of 2
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
resetHeartbeatForStorages();
}
@ -1708,14 +1710,17 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
@Test
public void testChosenFailureForStorageType() {
final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
HdfsConstants.StoragePolicy.COLD.value()), null);
assertEquals(0, targets.length);
assertNotEquals(0,
StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE"));
appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
}
@Test

View File

@ -27,6 +27,7 @@ import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeoutException;
@ -38,15 +39,19 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.util.function.Supplier;
/**
* Test periodic logging of DataNode metrics.
*/
@ -123,13 +128,13 @@ public class TestDataNodeMetricsLogger {
}
@Test
@SuppressWarnings("unchecked")
public void testMetricsLoggerIsAsync() throws IOException {
startDNForTest(true);
assertNotNull(dn);
assertTrue(Collections.list(
org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders())
.get(0) instanceof org.apache.log4j.AsyncAppender);
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertTrue(appenders.get(0) instanceof AsyncAppender);
}
/**
@ -144,15 +149,27 @@ public class TestDataNodeMetricsLogger {
metricsProvider);
startDNForTest(true);
assertNotNull(dn);
LogCapturer logCapturer =
LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME));
final PatternMatchingAppender appender =
(PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME)
.getAppender("PATTERNMATCHERAPPENDER");
// Ensure that the supplied pattern was matched.
GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
1000, 60000);
logCapturer.stopCapturing();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return appender.isMatched();
}
}, 1000, 60000);
dn.shutdown();
}
private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
((AsyncAppender) appenders.get(0)).addAppender(appender);
}
public interface TestFakeMetricMXBean {
int getFakeMetric();
}

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@ -76,9 +77,10 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.Time;
import org.apache.log4j.SimpleLayout;
import org.apache.log4j.WriterAppender;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
@ -412,9 +414,14 @@ public class TestDirectoryScanner {
@Test(timeout=600000)
public void testScanDirectoryStructureWarn() throws Exception {
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
//add a logger stream to check what has printed to log
ByteArrayOutputStream loggerStream = new ByteArrayOutputStream();
org.apache.log4j.Logger rootLogger =
org.apache.log4j.Logger.getRootLogger();
GenericTestUtils.setRootLogLevel(Level.INFO);
WriterAppender writerAppender =
new WriterAppender(new SimpleLayout(), loggerStream);
rootLogger.addAppender(writerAppender);
Configuration conf = getConfiguration();
cluster = new MiniDFSCluster
@ -445,7 +452,7 @@ public class TestDirectoryScanner {
scan(1, 1, 0, 1, 0, 0, 0);
//ensure the warn log not appear and missing block log do appear
String logContent = logCapturer.getOutput();
String logContent = new String(loggerStream.toByteArray());
String missingBlockWarn = "Deleted a metadata file" +
" for the deleted block";
String dirStructureWarnLog = " found in invalid directory." +
@ -457,7 +464,6 @@ public class TestDirectoryScanner {
LOG.info("check pass");
} finally {
logCapturer.stopCapturing();
if (scanner != null) {
scanner.shutdown();
scanner = null;
@ -520,7 +526,7 @@ public class TestDirectoryScanner {
client = cluster.getFileSystem().getClient();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
// log trace
LogCapturer logCapturer = LogCapturer.
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.
captureLogs(NameNode.stateChangeLog);
// Add files with 5 blocks
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false);

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import net.jcip.annotations.NotThreadSafe;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
@ -53,6 +51,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
@ -80,10 +79,10 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@ -394,7 +393,9 @@ public class TestFsDatasetCache {
}
// nth file should hit a capacity exception
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@ -402,12 +403,11 @@ public class TestFsDatasetCache {
public Boolean get() {
// check the log reported by FsDataSetCache
// in the case that cache capacity is exceeded.
int lines = StringUtils.countMatches(logCapturer.getOutput(),
int lines = appender.countLinesWithMessage(
"could not reserve more bytes in the cache: ");
return lines > 0;
}
}, 500, 30000);
logCapturer.stopCapturing();
// Also check the metrics for the failure
assertTrue("Expected more than 0 failed cache attempts",
fsd.getNumBlocksFailedToCache() > 0);

View File

@ -16,7 +16,6 @@
*/
package org.apache.hadoop.hdfs.server.diskbalancer;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Preconditions;
import java.util.function.Supplier;
import org.apache.commons.codec.digest.DigestUtils;
@ -322,7 +321,7 @@ public class TestDiskBalancer {
0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
LogCapturer logCapturer = LogCapturer
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(DiskBalancer.LOG);
try {

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.regex.Pattern;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
/**
* An appender that matches logged messages against the given
* regular expression.
*/
public class PatternMatchingAppender extends AppenderSkeleton {
private final Pattern pattern;
private volatile boolean matched;
public PatternMatchingAppender() {
this.pattern = Pattern.compile("^.*FakeMetric.*$");
this.matched = false;
}
public boolean isMatched() {
return matched;
}
@Override
protected void append(LoggingEvent event) {
if (pattern.matcher(event.getMessage().toString()).matches()) {
matched = true;
}
}
@Override
public void close() {
}
@Override
public boolean requiresLayout() {
return false;
}
}

View File

@ -37,7 +37,7 @@ import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authorize.ProxyServers;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.util.Lists;
import org.junit.Before;

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;

View File

@ -24,6 +24,7 @@ import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
@ -38,9 +39,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.AfterClass;
@ -103,7 +107,6 @@ public class TestAuditLogs {
UserGroupInformation userGroupInfo;
@Before
@SuppressWarnings("unchecked")
public void setupCluster() throws Exception {
// must configure prior to instantiating the namesystem because it
// will reconfigure the logger if async is enabled
@ -119,9 +122,11 @@ public class TestAuditLogs {
util.createFiles(fs, fileName);
// make sure the appender is what it's supposed to be
assertTrue(Collections.list(org.apache.log4j.Logger.getLogger(
"org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders())
.get(0) instanceof org.apache.log4j.AsyncAppender);
Logger logger = org.apache.log4j.Logger.getLogger(
"org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit");
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertTrue(appenders.get(0) instanceof AsyncAppender);
fnames = util.getFileNames(fileName);
util.waitReplication(fs, fileName, (short)3);

View File

@ -82,7 +82,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
@ -863,7 +863,7 @@ public class TestCheckpoint {
savedSd = sd;
}
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(Storage.class));
try {
// try to lock the storage that's already locked

View File

@ -49,7 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;

View File

@ -83,7 +83,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
@ -91,6 +90,9 @@ import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.LogManager;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@ -1715,13 +1717,36 @@ public class TestEditLog {
}
}
class TestAppender extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override
public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<>(log);
}
}
/**
*
* @throws Exception
*/
@Test
public void testReadActivelyUpdatedLog() throws Exception {
final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final TestAppender appender = new TestAppender();
LogManager.getRootLogger().addAppender(appender);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
@ -1769,16 +1794,21 @@ public class TestEditLog {
rwf.close();
events.poll();
for (String logLine : logCapturer.getOutput().split("\n")) {
if (logLine != null && logLine.contains("Caught exception after reading")) {
String pattern = "Caught exception after reading (.*) ops";
Pattern r = Pattern.compile(pattern);
final List<LoggingEvent> log = appender.getLog();
for (LoggingEvent event : log) {
Matcher m = r.matcher(event.getRenderedMessage());
if (m.find()) {
fail("Should not try to read past latest syned edit log op");
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
logCapturer.stopCapturing();
LogManager.getRootLogger().removeAppender(appender);
}
}

View File

@ -26,8 +26,6 @@ import java.io.IOException;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.junit.Assert;
import org.junit.Test;
@ -120,8 +118,8 @@ public class TestEditsDoubleBuffer {
op3.setTransactionId(3);
buffer.writeOp(op3, fakeLogVersion);
LogCapturer logs =
LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
GenericTestUtils.LogCapturer logs =
GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
try {
buffer.close();
fail();

View File

@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.FakeTimer;
import org.slf4j.event.Level;

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.util.FakeTimer;
import org.apache.hadoop.util.Time;

View File

@ -29,8 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -60,7 +58,7 @@ public class TestFSNamesystemLockReport {
private MiniDFSCluster cluster;
private FileSystem fs;
private UserGroupInformation userGroupInfo;
private LogCapturer logs;
private GenericTestUtils.LogCapturer logs;
@Before
public void setUp() throws Exception {
@ -78,7 +76,7 @@ public class TestFSNamesystemLockReport {
userGroupInfo = UserGroupInformation.createUserForTesting("bob",
new String[] {"hadoop"});
logs = LogCapturer.captureLogs(FSNamesystem.LOG);
logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG);
GenericTestUtils
.setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()),
org.slf4j.event.Level.INFO);

View File

@ -115,7 +115,7 @@ import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.AfterClass;

View File

@ -18,13 +18,15 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.junit.Rule;
import org.junit.Test;
@ -32,6 +34,7 @@ import org.junit.rules.Timeout;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
@ -61,12 +64,12 @@ public class TestNameNodeMetricsLogger {
}
@Test
@SuppressWarnings("unchecked")
public void testMetricsLoggerIsAsync() throws IOException {
makeNameNode(true);
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
assertTrue(Collections.list(logger.getAllAppenders()).get(0)
instanceof org.apache.log4j.AsyncAppender);
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertTrue(appenders.get(0) instanceof AsyncAppender);
}
/**
@ -77,14 +80,20 @@ public class TestNameNodeMetricsLogger {
public void testMetricsLogOutput()
throws IOException, InterruptedException, TimeoutException {
TestFakeMetric metricsProvider = new TestFakeMetric();
MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider);
MBeans.register(this.getClass().getSimpleName(),
"DummyMetrics", metricsProvider);
makeNameNode(true); // Log metrics early and often.
LogCapturer logCapturer =
LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME));
final PatternMatchingAppender appender =
(PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME)
.getAppender("PATTERNMATCHERAPPENDER");
GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
1000, 60000);
logCapturer.stopCapturing();
// Ensure that the supplied pattern was matched.
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return appender.isMatched();
}
}, 1000, 60000);
}
/**

View File

@ -28,8 +28,7 @@ import java.util.Collection;
import org.junit.Test;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
public class TestNameNodeResourcePolicy {

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -68,12 +69,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -523,8 +524,10 @@ public class TestStartup {
// Corrupt the md5 files in all the namedirs
corruptFSImageMD5(true);
// Attach our own log appender so we can verify output
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
// Attach our own log appender so we can verify output
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// Try to start a new cluster
LOG.info("\n===========================================\n" +
@ -538,13 +541,10 @@ public class TestStartup {
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Failed to load FSImage file", ioe);
int md5failures =
org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
" is corrupt with MD5 checksum of ");
int md5failures = appender.countExceptionsWithMessage(
" is corrupt with MD5 checksum of ");
// Two namedirs, so should have seen two failures
assertEquals(2, md5failures);
logCapturer.stopCapturing();
}
} finally {
if (cluster != null) {

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -197,7 +197,7 @@ public class TestBootstrapStandby {
// Trying to bootstrap standby should now fail since the edit
// logs aren't available in the shared dir.
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(BootstrapStandby.class));
try {
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1));

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.Whitebox;
import org.junit.After;
import org.junit.Before;
@ -144,7 +143,7 @@ public class TestDelegationTokensWithHA {
() -> (DistributedFileSystem) FileSystem.get(conf));
GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG);
LogCapturer logCapture = LogCapturer
GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer
.captureLogs(ObserverReadProxyProvider.LOG);
try {
dfs.access(new Path("/"), FsAction.READ);

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.common.Util;
@ -47,12 +48,12 @@ import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ThreadUtil;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -298,38 +299,39 @@ public class TestStandbyCheckpoints {
@Test(timeout = 30000)
public void testCheckpointBeforeNameNodeInitializationIsComplete()
throws Exception {
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
final LogVerificationAppender appender = new LogVerificationAppender();
final org.apache.log4j.Logger logger = org.apache.log4j.Logger
.getRootLogger();
logger.addAppender(appender);
try {
// Transition 2 to observer
cluster.transitionToObserver(2);
doEdits(0, 10);
// After a rollEditLog, Standby(nn1)'s next checkpoint would be
// ahead of observer(nn2).
nns[0].getRpcServer().rollEditLog();
// Transition 2 to observer
cluster.transitionToObserver(2);
doEdits(0, 10);
// After a rollEditLog, Standby(nn1)'s next checkpoint would be
// ahead of observer(nn2).
nns[0].getRpcServer().rollEditLog();
NameNode nn2 = nns[2];
FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
NameNode nn2 = nns[2];
FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
// After standby creating a checkpoint, it will try to push the image to
// active and all observer, updating it's own txid to the most recent.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
// After standby creating a checkpoint, it will try to push the image to
// active and all observer, updating it's own txid to the most recent.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
cluster.transitionToStandby(2);
NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
cluster.transitionToStandby(2);
logger.removeAppender(appender);
for (String logLine : logCapturer.getOutput().split("\n")) {
if (logLine != null && logLine.contains("PutImage failed") && logLine.contains(
"FSImage has not been set in the NameNode.")) {
//Logs have the expected exception.
return;
}
for (LoggingEvent event : appender.getLog()) {
String message = event.getRenderedMessage();
if (message.contains("PutImage failed") &&
message.contains("FSImage has not been set in the NameNode.")) {
//Logs have the expected exception.
return;
}
fail("Expected exception not present in logs.");
} finally {
logCapturer.stopCapturing();
}
fail("Expected exception not present in logs.");
}
/**

View File

@ -93,7 +93,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.ExitUtil;
import org.junit.After;
@ -1372,7 +1372,7 @@ public class TestExternalStoragePolicySatisfier {
Path filePath = new Path("/zeroSizeFile");
DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
fs.setReplication(filePath, (short) 3);
LogCapturer logs = LogCapturer.captureLogs(
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class));
fs.setStoragePolicy(filePath, "COLD");
fs.satisfyStoragePolicy(filePath);

View File

@ -22,6 +22,9 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
# Only to be used for testing
log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender
#
# NameNode metrics logging.
# The default is to retain two namenode-metrics.log files up to 64MB each.
@ -29,10 +32,10 @@ log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%
# TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
# log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA
namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER
log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
log4j.additivity.NameNodeMetricsLog=false
log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB
log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log
@ -45,10 +48,10 @@ log4j.appender.ASYNCNNMETRICSRFA.maxBackupIndex=1
# TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
# log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA
datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER
log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
log4j.additivity.DataNodeMetricsLog=false
log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB
log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log
@ -69,7 +72,7 @@ hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender
log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender
log4j.appender.ASYNCAUDITAPPENDER.blocking=false
log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256
log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n

View File

@ -124,12 +124,6 @@
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<build>

View File

@ -36,10 +36,9 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent;
import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
import org.junit.After;
@ -108,10 +107,12 @@ import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@ -127,6 +128,29 @@ public class TestTaskAttempt{
}
}
private static class TestAppender extends AppenderSkeleton {
private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
public void close() {
}
@Override
protected void append(LoggingEvent arg0) {
logEvents.add(arg0);
}
private List<LoggingEvent> getLogEvents() {
return logEvents;
}
}
@BeforeClass
public static void setupBeforeClass() {
ResourceUtils.resetResourceTypes(new Configuration());
@ -1700,10 +1724,11 @@ public class TestTaskAttempt{
for (String memoryName : ImmutableList.of(
MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
LogCapturer logCapturer = LogCapturer.captureLogs(logger);
TestAppender testAppender = new TestAppender();
final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
try {
TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear();
logger.addAppender(testAppender);
EventHandler eventHandler = mock(EventHandler.class);
Clock clock = SystemClock.getInstance();
JobConf jobConf = new JobConf();
@ -1716,11 +1741,13 @@ public class TestTaskAttempt{
getResourceInfoFromContainerRequest(taImpl, eventHandler).
getMemorySize();
assertEquals(3072, memorySize);
assertTrue(logCapturer.getOutput().contains(
"Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is "
+ "overriding the mapreduce.reduce.memory.mb=2048 configuration"));
assertTrue(testAppender.getLogEvents().stream()
.anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " +
"mapreduce.reduce.resource." + memoryName + "=3Gi is " +
"overriding the mapreduce.reduce.memory.mb=2048 configuration")
.equals(e.getMessage())));
} finally {
logCapturer.stopCapturing();
logger.removeAppender(testAppender);
}
}
}
@ -1782,9 +1809,10 @@ public class TestTaskAttempt{
@Test
public void testReducerCpuRequestOverriding() {
final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
final LogCapturer logCapturer = LogCapturer.captureLogs(logger);
TestAppender testAppender = new TestAppender();
final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
try {
logger.addAppender(testAppender);
EventHandler eventHandler = mock(EventHandler.class);
Clock clock = SystemClock.getInstance();
JobConf jobConf = new JobConf();
@ -1797,11 +1825,13 @@ public class TestTaskAttempt{
getResourceInfoFromContainerRequest(taImpl, eventHandler).
getVirtualCores();
assertEquals(7, vCores);
assertTrue(logCapturer.getOutput().contains(
"Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the "
+ "mapreduce.reduce.cpu.vcores=9 configuration"));
assertTrue(testAppender.getLogEvents().stream().anyMatch(
e -> e.getLevel() == Level.WARN && ("Configuration " +
"mapreduce.reduce.resource.vcores=7 is overriding the " +
"mapreduce.reduce.cpu.vcores=9 configuration").equals(
e.getMessage())));
} finally {
logCapturer.stopCapturing();
logger.removeAppender(testAppender);
}
}

View File

@ -72,12 +72,6 @@
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<build>

View File

@ -23,10 +23,12 @@ import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.Flushable;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@ -42,13 +44,16 @@ import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.util.ProcessTree;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.log4j.Appender;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
@ -271,7 +276,42 @@ public class TaskLog {
}
// flush & close all appenders
HadoopLoggerUtils.shutdownLogManager();
LogManager.shutdown();
}
@SuppressWarnings("unchecked")
public static synchronized void syncLogs() {
// flush standard streams
//
System.out.flush();
System.err.flush();
// flush flushable appenders
//
final Logger rootLogger = Logger.getRootLogger();
flushAppenders(rootLogger);
final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
getCurrentLoggers();
while (allLoggers.hasMoreElements()) {
final Logger l = allLoggers.nextElement();
flushAppenders(l);
}
}
@SuppressWarnings("unchecked")
private static void flushAppenders(Logger l) {
final Enumeration<Appender> allAppenders = l.getAllAppenders();
while (allAppenders.hasMoreElements()) {
final Appender a = allAppenders.nextElement();
if (a instanceof Flushable) {
try {
((Flushable) a).flush();
} catch (IOException ioe) {
System.err.println(a + ": Failed to flush!"
+ StringUtils.stringifyException(ioe));
}
}
}
}
public static ScheduledExecutorService createLogSyncer() {
@ -296,7 +336,7 @@ public class TaskLog {
new Runnable() {
@Override
public void run() {
HadoopLoggerUtils.syncLogs();
TaskLog.syncLogs();
}
}, 0L, 5L, TimeUnit.SECONDS);
return scheduler;

View File

@ -28,19 +28,24 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.StringReader;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.WriterAppender;
import org.mockito.stubbing.Answer;
import org.slf4j.LoggerFactory;
/**
* Test to make sure that command line output for
@ -68,53 +73,55 @@ public class TestJobMonitorAndPrint {
@Test
public void testJobMonitorAndPrint() throws Exception {
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class));
try {
JobStatus jobStatus_1 =
new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING,
JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url",
true);
JobStatus jobStatus_2 =
new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH,
"tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true);
JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
"tmp-queue", "tmp-jobfile", "tmp-url", true);
JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
"tmp-queue", "tmp-jobfile", "tmp-url", true);
doAnswer((Answer<TaskCompletionEvent[]>) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when(
job).getTaskCompletionEvents(anyInt(), anyInt());
doAnswer((Answer<TaskCompletionEvent[]>) invocation ->
TaskCompletionEvent.EMPTY_ARRAY).when(job)
.getTaskCompletionEvents(anyInt(), anyInt());
doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
// setup the logger to capture all logs
Layout layout =
Logger.getRootLogger().getAppender("stdout").getLayout();
ByteArrayOutputStream os = new ByteArrayOutputStream();
WriterAppender appender = new WriterAppender(layout, os);
appender.setThreshold(Level.ALL);
Logger qlogger = Logger.getLogger(Job.class);
qlogger.addAppender(appender);
job.monitorAndPrintJob();
job.monitorAndPrintJob();
boolean foundHundred = false;
boolean foundComplete = false;
boolean foundUber = false;
String uberModeMatch = "uber mode : true";
String progressMatch = "map 100% reduce 100%";
String completionMatch = "completed successfully";
for (String logLine : logCapturer.getOutput().split("\n")) {
if (logLine.contains(uberModeMatch)) {
foundUber = true;
}
if (logLine.contains(progressMatch)) {
foundHundred = true;
}
if (logLine.contains(completionMatch)) {
foundComplete = true;
}
if (foundUber && foundHundred && foundComplete) {
break;
}
qlogger.removeAppender(appender);
LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
String line;
boolean foundHundred = false;
boolean foundComplete = false;
boolean foundUber = false;
String uberModeMatch = "uber mode : true";
String progressMatch = "map 100% reduce 100%";
String completionMatch = "completed successfully";
while ((line = r.readLine()) != null) {
if (line.contains(uberModeMatch)) {
foundUber = true;
}
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
System.out.println("The output of job.toString() is : \n" + job.toString());
assertTrue(job.toString().contains("Number of maps: 5\n"));
assertTrue(job.toString().contains("Number of reduces: 5\n"));
} finally {
logCapturer.stopCapturing();
foundHundred = line.contains(progressMatch);
if (foundHundred)
break;
}
line = r.readLine();
foundComplete = line.contains(completionMatch);
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
System.out.println("The output of job.toString() is : \n" + job.toString());
assertTrue(job.toString().contains("Number of maps: 5\n"));
assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
}

View File

@ -128,12 +128,6 @@
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<profiles>

View File

@ -34,6 +34,7 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@ -44,6 +45,7 @@ import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hadoop.conf.Configuration;
@ -53,7 +55,6 @@ import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus.State;
@ -109,6 +110,13 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.SimpleLayout;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -136,6 +144,29 @@ public class TestYARNRunner {
MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%"));
private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource";
private static class TestAppender extends AppenderSkeleton {
private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
public void close() {
}
@Override
protected void append(LoggingEvent arg0) {
logEvents.add(arg0);
}
private List<LoggingEvent> getLogEvents() {
return logEvents;
}
}
private YARNRunner yarnRunner;
private ResourceMgrDelegate resourceMgrDelegate;
private YarnConfiguration conf;
@ -518,48 +549,38 @@ public class TestYARNRunner {
assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
}
}
@Test(timeout=20000)
public void testWarnCommandOpts() throws Exception {
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
try {
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
"-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
YARNRunner yarnRunner = new YARNRunner(jobConf);
@SuppressWarnings("unused")
ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf);
boolean isFoundOne = false;
boolean isFoundTwo = false;
for (String logLine : logCapturer.getOutput().split("\n")) {
if (logLine == null) {
continue;
}
if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
+ "yarn.app.mapreduce.am.admin-command-opts can cause programs to no "
+ "longer function if hadoop native libraries are used. These values "
+ "should be set as part of the LD_LIBRARY_PATH in the app master JVM "
+ "env using yarn.app.mapreduce.am.admin.user.env config settings.")) {
isFoundOne = true;
}
if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
+ "yarn.app.mapreduce.am.command-opts can cause programs to no longer "
+ "function if hadoop native libraries are used. These values should "
+ "be set as part of the LD_LIBRARY_PATH in the app master JVM env "
+ "using yarn.app.mapreduce.am.env config settings.")) {
isFoundTwo = true;
}
}
assertTrue(isFoundOne);
assertTrue(isFoundTwo);
} finally {
logCapturer.stopCapturing();
}
org.apache.log4j.Logger logger =
org.apache.log4j.Logger.getLogger(YARNRunner.class);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
Layout layout = new SimpleLayout();
Appender appender = new WriterAppender(layout, bout);
logger.addAppender(appender);
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
YARNRunner yarnRunner = new YARNRunner(jobConf);
@SuppressWarnings("unused")
ApplicationSubmissionContext submissionContext =
buildSubmitContext(yarnRunner, jobConf);
String logMsg = bout.toString();
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
"yarn.app.mapreduce.am.admin-command-opts can cause programs to no " +
"longer function if hadoop native libraries are used. These values " +
"should be set as part of the LD_LIBRARY_PATH in the app master JVM " +
"env using yarn.app.mapreduce.am.admin.user.env config settings."));
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
"yarn.app.mapreduce.am.command-opts can cause programs to no longer " +
"function if hadoop native libraries are used. These values should " +
"be set as part of the LD_LIBRARY_PATH in the app master JVM env " +
"using yarn.app.mapreduce.am.env config settings."));
}
@Test(timeout=20000)
@ -975,7 +996,10 @@ public class TestYARNRunner {
for (String memoryName : ImmutableList.of(
MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
TestAppender testAppender = new TestAppender();
org.apache.log4j.Logger logger =
org.apache.log4j.Logger.getLogger(YARNRunner.class);
logger.addAppender(testAppender);
try {
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi");
@ -993,17 +1017,13 @@ public class TestYARNRunner {
long memorySize = resourceRequest.getCapability().getMemorySize();
Assert.assertEquals(3072, memorySize);
boolean isLogFound = false;
for (String logLine : logCapturer.getOutput().split("\n")) {
if (logLine != null && logLine.contains("WARN") && logLine.contains(
"Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is "
+ "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) {
isLogFound = true;
}
}
assertTrue("Log line could not be found", isLogFound);
assertTrue(testAppender.getLogEvents().stream().anyMatch(
e -> e.getLevel() == Level.WARN && ("Configuration " +
"yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " +
"overriding the yarn.app.mapreduce.am.resource.mb=2048 " +
"configuration").equals(e.getMessage())));
} finally {
logCapturer.stopCapturing();
logger.removeAppender(testAppender);
}
}
}

View File

@ -29,6 +29,8 @@ import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.JobConf;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
@ -74,10 +76,12 @@ public class TestChild extends HadoopTestCase {
mapJavaOpts,
mapJavaOpts, MAP_OPTS_VAL);
}
String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO");
assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
"OFF");
Level logLevel =
Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL,
Level.INFO.toString()));
assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " +
logLevel, logLevel, Level.OFF);
}
}
@ -104,10 +108,12 @@ public class TestChild extends HadoopTestCase {
reduceJavaOpts,
reduceJavaOpts, REDUCE_OPTS_VAL);
}
String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO");
assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
"OFF");
Level logLevel =
Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL,
Level.INFO.toString()));
assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " +
logLevel, logLevel, Level.OFF);
}
}
@ -121,9 +127,9 @@ public class TestChild extends HadoopTestCase {
conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL);
conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL);
}
conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF");
conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF");
conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString());
conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString());
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
numMaps, numReds);

View File

@ -25,7 +25,6 @@ import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.Assert;
@ -51,6 +50,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -63,7 +64,8 @@ public class TestJHSSecurity {
@Test
public void testDelegationToken() throws Exception {
HadoopLoggerUtils.setLogLevel("root", "DEBUG");
org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
final YarnConfiguration conf = new YarnConfiguration(new JobConf());
// Just a random principle

View File

@ -99,6 +99,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@ -556,9 +557,9 @@ public class TestMRJobs {
systemClasses);
}
sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL");
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
final SleepJob sleepJob = new SleepJob();
sleepJob.setConf(sleepConf);
@ -855,11 +856,11 @@ public class TestMRJobs {
final SleepJob sleepJob = new SleepJob();
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
final long userLogKb = 4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
final long amLogKb = 7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7);

View File

@ -1944,18 +1944,6 @@
<artifactId>log4j-web</artifactId>
<version>${log4j2.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
</dependencyManagement>

View File

@ -349,12 +349,7 @@
<artifactId>hamcrest-library</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<profiles>

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;

View File

@ -23,7 +23,7 @@ import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.slf4j.Logger;

View File

@ -81,12 +81,6 @@
<artifactId>hadoop-hdfs-client</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-logging</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCp;
import org.apache.hadoop.tools.DistCpConstants;
@ -702,8 +701,8 @@ public abstract class AbstractContractDistCpTest
GenericTestUtils
.createFiles(remoteFS, source, getDepth(), getWidth(), getWidth());
LogCapturer log =
LogCapturer.captureLogs(SimpleCopyListing.LOG);
GenericTestUtils.LogCapturer log =
GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);
String options = "-useiterator -update -delete" + getDefaultCLIOptions();
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),

View File

@ -27,10 +27,11 @@ import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.tools.rumen.datatypes.*;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/**
* A default parser for MapReduce job configuration properties.
@ -82,7 +83,7 @@ public class MapReduceJobPropertiesParser implements JobPropertyParser {
// turn off the warning w.r.t deprecated mapreduce keys
static {
HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF");
Logger.getLogger(Configuration.class).setLevel(Level.OFF);
}
// Accepts a key if there is a corresponding key in the current mapreduce

View File

@ -576,6 +576,16 @@
<Bug pattern="SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING" />
</Match>
<!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
<Match>
<Class name="org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender$Element" />
<Or>
<Field name="count" />
<Field name="timestampSeconds" />
</Or>
<Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
</Match>
<Match>
<Class name="org.apache.hadoop.yarn.api.records.ResourceRequest" />
<Method name="equals" />

View File

@ -63,7 +63,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@ -127,6 +126,7 @@ import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.TimelineServiceHelper;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.log4j.LogManager;
import org.apache.hadoop.classification.VisibleForTesting;
import com.sun.jersey.api.client.ClientHandlerException;
@ -403,7 +403,7 @@ public class ApplicationMaster {
result = appMaster.finish();
} catch (Throwable t) {
LOG.error("Error running ApplicationMaster", t);
HadoopLoggerUtils.shutdownLogManager();
LogManager.shutdown();
ExitUtil.terminate(1, t);
} finally {
if (appMaster != null) {
@ -529,7 +529,7 @@ public class ApplicationMaster {
//Check whether customer log4j.properties file exists
if (fileExist(log4jPath)) {
try {
HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class,
Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class,
log4jPath);
} catch (Exception e) {
LOG.warn("Can not set up custom log4j properties. " + e);

Some files were not shown because too many files have changed in this diff Show More