HADOOP-18206 Cleanup the commons-logging references and restrict its usage in future (#5315)

This commit is contained in:
Viraj Jasani 2023-02-13 11:24:06 -08:00 committed by GitHub
parent 30f560554d
commit 90de1ff151
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 223 additions and 532 deletions

View File

@ -250,7 +250,6 @@ commons-codec:commons-codec:1.11
commons-collections:commons-collections:3.2.2 commons-collections:commons-collections:3.2.2
commons-daemon:commons-daemon:1.0.13 commons-daemon:commons-daemon:1.0.13
commons-io:commons-io:2.8.0 commons-io:commons-io:2.8.0
commons-logging:commons-logging:1.1.3
commons-net:commons-net:3.9.0 commons-net:commons-net:3.9.0
de.ruedigermoeller:fst:2.50 de.ruedigermoeller:fst:2.50
io.grpc:grpc-api:1.26.0 io.grpc:grpc-api:1.26.0

View File

@ -180,11 +180,6 @@
<artifactId>jersey-server</artifactId> <artifactId>jersey-server</artifactId>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>compile</scope>
</dependency>
<dependency> <dependency>
<groupId>log4j</groupId> <groupId>log4j</groupId>
<artifactId>log4j</artifactId> <artifactId>log4j</artifactId>

View File

@ -32,7 +32,6 @@ import java.nio.file.StandardOpenOption;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -246,30 +245,6 @@ public class IOUtils {
} }
} }
/**
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
* null pointers. Must only be used for cleanup in exception handlers.
*
* @param log the log to record problems to at debug level. Can be null.
* @param closeables the objects to close
* @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
* instead
*/
@Deprecated
public static void cleanup(Log log, java.io.Closeable... closeables) {
for (java.io.Closeable c : closeables) {
if (c != null) {
try {
c.close();
} catch(Throwable e) {
if (log != null && log.isDebugEnabled()) {
log.debug("Exception in closing " + c, e);
}
}
}
}
}
/** /**
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or * Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
* null pointers. Must only be used for cleanup in exception handlers. * null pointers. Must only be used for cleanup in exception handlers.

View File

@ -34,10 +34,6 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Jdk14Logger;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -51,6 +47,8 @@ import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/** /**
* Change log level in runtime. * Change log level in runtime.
@ -340,22 +338,14 @@ public class LogLevel {
out.println(MARKER out.println(MARKER
+ "Submitted Class Name: <b>" + logName + "</b><br />"); + "Submitted Class Name: <b>" + logName + "</b><br />");
Log log = LogFactory.getLog(logName); Logger log = Logger.getLogger(logName);
out.println(MARKER out.println(MARKER
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />"); + "Log Class: <b>" + log.getClass().getName() +"</b><br />");
if (level != null) { if (level != null) {
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />"); out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
} }
if (log instanceof Log4JLogger) { process(log, level, out);
process(((Log4JLogger)log).getLogger(), level, out);
}
else if (log instanceof Jdk14Logger) {
process(((Jdk14Logger)log).getLogger(), level, out);
}
else {
out.println("Sorry, " + log.getClass() + " not supported.<br />");
}
} }
out.println(FORMS); out.println(FORMS);
@ -371,14 +361,14 @@ public class LogLevel {
+ "<input type='submit' value='Set Log Level' />" + "<input type='submit' value='Set Log Level' />"
+ "</form>"; + "</form>";
private static void process(org.apache.log4j.Logger log, String level, private static void process(Logger log, String level,
PrintWriter out) throws IOException { PrintWriter out) throws IOException {
if (level != null) { if (level != null) {
if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level) if (!level.equalsIgnoreCase(Level.toLevel(level)
.toString())) { .toString())) {
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />"); out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
} else { } else {
log.setLevel(org.apache.log4j.Level.toLevel(level)); log.setLevel(Level.toLevel(level));
out.println(MARKER + "Setting Level to " + level + " ...<br />"); out.println(MARKER + "Setting Level to " + level + " ...<br />");
} }
} }
@ -386,21 +376,5 @@ public class LogLevel {
+ "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />"); + "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
} }
private static void process(java.util.logging.Logger log, String level,
PrintWriter out) throws IOException {
if (level != null) {
String levelToUpperCase = level.toUpperCase();
try {
log.setLevel(java.util.logging.Level.parse(levelToUpperCase));
} catch (IllegalArgumentException e) {
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
}
out.println(MARKER + "Setting Level to " + level + " ...<br />");
}
java.util.logging.Level lev;
for(; (lev = log.getLevel()) == null; log = log.getParent());
out.println(MARKER + "Effective Level: <b>" + lev + "</b><br />");
}
} }
} }

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.service;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -75,9 +74,10 @@ public final class ServiceOperations {
* @param log the log to warn at * @param log the log to warn at
* @param service a service; may be null * @param service a service; may be null
* @return any exception that was caught; null if none was. * @return any exception that was caught; null if none was.
* @see ServiceOperations#stopQuietly(Service) * @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead.
*/ */
public static Exception stopQuietly(Log log, Service service) { @Deprecated
public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) {
try { try {
stop(service); stop(service);
} catch (Exception e) { } catch (Exception e) {

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.slf4j.Logger;
class LogAdapter {
private Log LOG;
private Logger LOGGER;
private LogAdapter(Log LOG) {
this.LOG = LOG;
}
private LogAdapter(Logger LOGGER) {
this.LOGGER = LOGGER;
}
/**
* @deprecated use {@link #create(Logger)} instead
*/
@Deprecated
public static LogAdapter create(Log LOG) {
return new LogAdapter(LOG);
}
public static LogAdapter create(Logger LOGGER) {
return new LogAdapter(LOGGER);
}
public void info(String msg) {
if (LOG != null) {
LOG.info(msg);
} else if (LOGGER != null) {
LOGGER.info(msg);
}
}
public void warn(String msg, Throwable t) {
if (LOG != null) {
LOG.warn(msg, t);
} else if (LOGGER != null) {
LOGGER.warn(msg, t);
}
}
public void debug(Throwable t) {
if (LOG != null) {
LOG.debug(t);
} else if (LOGGER != null) {
LOGGER.debug("", t);
}
}
public void error(String msg) {
if (LOG != null) {
LOG.error(msg);
} else if (LOGGER != null) {
LOGGER.error(msg);
}
}
}

View File

@ -36,7 +36,6 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
@ -228,8 +227,10 @@ public class ReflectionUtils {
* @param log the logger that logs the stack trace * @param log the logger that logs the stack trace
* @param title a descriptive title for the call stacks * @param title a descriptive title for the call stacks
* @param minInterval the minimum time from the last * @param minInterval the minimum time from the last
* @deprecated to be removed with 3.4.0. Use {@link #logThreadInfo(Logger, String, long)} instead.
*/ */
public static void logThreadInfo(Log log, @Deprecated
public static void logThreadInfo(org.apache.commons.logging.Log log,
String title, String title,
long minInterval) { long minInterval) {
boolean dumpStack = false; boolean dumpStack = false;

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.util; package org.apache.hadoop.util;
import org.slf4j.Logger;
import sun.misc.Signal; import sun.misc.Signal;
import sun.misc.SignalHandler; import sun.misc.SignalHandler;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -42,11 +42,11 @@ public enum SignalLogger {
* Our signal handler. * Our signal handler.
*/ */
private static class Handler implements SignalHandler { private static class Handler implements SignalHandler {
final private LogAdapter LOG; final private Logger log;
final private SignalHandler prevHandler; final private SignalHandler prevHandler;
Handler(String name, LogAdapter LOG) { Handler(String name, Logger log) {
this.LOG = LOG; this.log = log;
prevHandler = Signal.handle(new Signal(name), this); prevHandler = Signal.handle(new Signal(name), this);
} }
@ -57,7 +57,7 @@ public enum SignalLogger {
*/ */
@Override @Override
public void handle(Signal signal) { public void handle(Signal signal) {
LOG.error("RECEIVED SIGNAL " + signal.getNumber() + log.error("RECEIVED SIGNAL " + signal.getNumber() +
": SIG" + signal.getName()); ": SIG" + signal.getName());
prevHandler.handle(signal); prevHandler.handle(signal);
} }
@ -66,13 +66,9 @@ public enum SignalLogger {
/** /**
* Register some signal handlers. * Register some signal handlers.
* *
* @param LOG The log4j logfile to use in the signal handlers. * @param log The log4j logfile to use in the signal handlers.
*/ */
public void register(final Log LOG) { public void register(final Logger log) {
register(LogAdapter.create(LOG));
}
void register(final LogAdapter LOG) {
if (registered) { if (registered) {
throw new IllegalStateException("Can't re-install the signal handlers."); throw new IllegalStateException("Can't re-install the signal handlers.");
} }
@ -83,15 +79,15 @@ public enum SignalLogger {
String separator = ""; String separator = "";
for (String signalName : SIGNALS) { for (String signalName : SIGNALS) {
try { try {
new Handler(signalName, LOG); new Handler(signalName, log);
bld.append(separator) bld.append(separator)
.append(signalName); .append(signalName);
separator = ", "; separator = ", ";
} catch (Exception e) { } catch (Exception e) {
LOG.debug(e); log.debug("Error: ", e);
} }
} }
bld.append("]"); bld.append("]");
LOG.info(bld.toString()); log.info(bld.toString());
} }
} }

View File

@ -740,42 +740,26 @@ public class StringUtils {
* Print a log message for starting up and shutting down * Print a log message for starting up and shutting down
* @param clazz the class of the server * @param clazz the class of the server
* @param args arguments * @param args arguments
* @param LOG the target log object * @param log the target log object
*/ */
public static void startupShutdownMessage(Class<?> clazz, String[] args, public static void startupShutdownMessage(Class<?> clazz, String[] args,
final org.apache.commons.logging.Log LOG) { final org.slf4j.Logger log) {
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
}
/**
* Print a log message for starting up and shutting down
* @param clazz the class of the server
* @param args arguments
* @param LOG the target log object
*/
public static void startupShutdownMessage(Class<?> clazz, String[] args,
final org.slf4j.Logger LOG) {
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
}
static void startupShutdownMessage(Class<?> clazz, String[] args,
final LogAdapter LOG) {
final String hostname = NetUtils.getHostname(); final String hostname = NetUtils.getHostname();
final String classname = clazz.getSimpleName(); final String classname = clazz.getSimpleName();
LOG.info(createStartupShutdownMessage(classname, hostname, args)); log.info(createStartupShutdownMessage(classname, hostname, args));
if (SystemUtils.IS_OS_UNIX) { if (SystemUtils.IS_OS_UNIX) {
try { try {
SignalLogger.INSTANCE.register(LOG); SignalLogger.INSTANCE.register(log);
} catch (Throwable t) { } catch (Throwable t) {
LOG.warn("failed to register any UNIX signal loggers: ", t); log.warn("failed to register any UNIX signal loggers: ", t);
} }
} }
ShutdownHookManager.get().addShutdownHook( ShutdownHookManager.get().addShutdownHook(
new Runnable() { new Runnable() {
@Override @Override
public void run() { public void run() {
LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
"Shutting down " + classname + " at " + hostname})); "Shutting down " + classname + " at " + hostname}));
LogManager.shutdown(); LogManager.shutdown();
} }

View File

@ -25,8 +25,6 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -39,7 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* *
@ -51,8 +50,8 @@ import org.junit.Test;
*/ */
public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest { public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
private static final Log LOG = private static final Logger LOG =
LogFactory.getLog(TestViewFileSystemLocalFileSystem.class); LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class);
@Override @Override
@Before @Before

View File

@ -21,8 +21,6 @@ import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -35,6 +33,8 @@ import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* *
@ -43,8 +43,8 @@ import org.junit.Test;
*/ */
public class TestViewFileSystemOverloadSchemeLocalFileSystem { public class TestViewFileSystemOverloadSchemeLocalFileSystem {
private static final String FILE = "file"; private static final String FILE = "file";
private static final Log LOG = private static final Logger LOG =
LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class); LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
private FileSystem fsTarget; private FileSystem fsTarget;
private Configuration conf; private Configuration conf;
private Path targetTestRoot; private Path targetTestRoot;

View File

@ -17,8 +17,6 @@
*/ */
package org.apache.hadoop.http; package org.apache.hadoop.http;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.minikdc.MiniKdc;
@ -53,8 +51,6 @@ import static org.junit.Assert.assertTrue;
*/ */
public class TestHttpServerWithSpnego { public class TestHttpServerWithSpnego {
static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class);
private static final String SECRET_STR = "secret"; private static final String SECRET_STR = "secret";
private static final String HTTP_USER = "HTTP"; private static final String HTTP_USER = "HTTP";
private static final String PREFIX = "hadoop.http.authentication."; private static final String PREFIX = "hadoop.http.authentication.";

View File

@ -22,8 +22,6 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ContainerNode; import com.fasterxml.jackson.databind.node.ContainerNode;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Appender; import org.apache.log4j.Appender;
import org.apache.log4j.Category; import org.apache.log4j.Category;
@ -44,8 +42,6 @@ import java.util.Vector;
public class TestLog4Json { public class TestLog4Json {
private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
@Test @Test
public void testConstruction() throws Throwable { public void testConstruction() throws Throwable {
Log4Json l4j = new Log4Json(); Log4Json l4j = new Log4Json();

View File

@ -22,9 +22,6 @@ import java.net.SocketException;
import java.net.URI; import java.net.URI;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
private final String logName = TestLogLevel.class.getName(); private final String logName = TestLogLevel.class.getName();
private String clientPrincipal; private String clientPrincipal;
private String serverPrincipal; private String serverPrincipal;
private final Log testlog = LogFactory.getLog(logName); private final Logger log = Logger.getLogger(logName);
private final Logger log = ((Log4JLogger)testlog).getLogger();
private final static String PRINCIPAL = "loglevel.principal"; private final static String PRINCIPAL = "loglevel.principal";
private final static String KEYTAB = "loglevel.keytab"; private final static String KEYTAB = "loglevel.keytab";
private static final String PREFIX = "hadoop.http.authentication."; private static final String PREFIX = "hadoop.http.authentication.";

View File

@ -49,8 +49,6 @@ import java.util.function.Supplier;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -117,29 +115,11 @@ public abstract class GenericTestUtils {
public static final String ERROR_INVALID_ARGUMENT = public static final String ERROR_INVALID_ARGUMENT =
"Total wait time should be greater than check interval time"; "Total wait time should be greater than check interval time";
/**
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
*/
@Deprecated
@SuppressWarnings("unchecked")
public static void disableLog(Log log) {
// We expect that commons-logging is a wrapper around Log4j.
disableLog((Log4JLogger) log);
}
@Deprecated @Deprecated
public static Logger toLog4j(org.slf4j.Logger logger) { public static Logger toLog4j(org.slf4j.Logger logger) {
return LogManager.getLogger(logger.getName()); return LogManager.getLogger(logger.getName());
} }
/**
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
*/
@Deprecated
public static void disableLog(Log4JLogger log) {
log.getLogger().setLevel(Level.OFF);
}
/** /**
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
*/ */
@ -152,45 +132,6 @@ public abstract class GenericTestUtils {
disableLog(toLog4j(logger)); disableLog(toLog4j(logger));
} }
/**
* @deprecated
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
*/
@Deprecated
@SuppressWarnings("unchecked")
public static void setLogLevel(Log log, Level level) {
// We expect that commons-logging is a wrapper around Log4j.
setLogLevel((Log4JLogger) log, level);
}
/**
* A helper used in log4j2 migration to accept legacy
* org.apache.commons.logging apis.
* <p>
* And will be removed after migration.
*
* @param log a log
* @param level level to be set
*/
@Deprecated
public static void setLogLevel(Log log, org.slf4j.event.Level level) {
setLogLevel(log, Level.toLevel(level.toString()));
}
/**
* @deprecated
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
*/
@Deprecated
public static void setLogLevel(Log4JLogger log, Level level) {
log.getLogger().setLevel(level);
}
/**
* @deprecated
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
*/
@Deprecated
public static void setLogLevel(Logger logger, Level level) { public static void setLogLevel(Logger logger, Level level) {
logger.setLevel(level); logger.setLevel(level);
} }
@ -535,13 +476,15 @@ public abstract class GenericTestUtils {
private WriterAppender appender; private WriterAppender appender;
private Logger logger; private Logger logger;
public static LogCapturer captureLogs(Log l) { public static LogCapturer captureLogs(org.slf4j.Logger logger) {
Logger logger = ((Log4JLogger)l).getLogger(); if (logger.getName().equals("root")) {
return new LogCapturer(logger); return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
}
return new LogCapturer(toLog4j(logger));
} }
public static LogCapturer captureLogs(org.slf4j.Logger logger) { public static LogCapturer captureLogs(Logger logger) {
return new LogCapturer(toLog4j(logger)); return new LogCapturer(logger);
} }
private LogCapturer(Logger logger) { private LogCapturer(Logger logger) {

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.util; package org.apache.hadoop.util;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
@ -43,7 +43,7 @@ public class TestJarFinder {
public void testJar() throws Exception { public void testJar() throws Exception {
//picking a class that is for sure in a JAR in the classpath //picking a class that is for sure in a JAR in the classpath
String jar = JarFinder.getJar(LogFactory.class); String jar = JarFinder.getJar(LoggerFactory.class);
Assert.assertTrue(new File(jar).exists()); Assert.assertTrue(new File(jar).exists());
} }

View File

@ -32,9 +32,9 @@ public class TestSignalLogger {
@Test(timeout=60000) @Test(timeout=60000)
public void testInstall() throws Exception { public void testInstall() throws Exception {
Assume.assumeTrue(SystemUtils.IS_OS_UNIX); Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
SignalLogger.INSTANCE.register(LogAdapter.create(LOG)); SignalLogger.INSTANCE.register(LOG);
try { try {
SignalLogger.INSTANCE.register(LogAdapter.create(LOG)); SignalLogger.INSTANCE.register(LOG);
Assert.fail("expected IllegalStateException from double registration"); Assert.fail("expected IllegalStateException from double registration");
} catch (IllegalStateException e) { } catch (IllegalStateException e) {
// fall through // fall through

View File

@ -63,11 +63,6 @@
<artifactId>mockito-core</artifactId> <artifactId>mockito-core</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>compile</scope>
</dependency>
<dependency> <dependency>
<groupId>javax.servlet</groupId> <groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId> <artifactId>javax.servlet-api</artifactId>

View File

@ -61,10 +61,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<scope>provided</scope> <scope>provided</scope>
<exclusions> <exclusions>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion> <exclusion>
<groupId>log4j</groupId> <groupId>log4j</groupId>
<artifactId>log4j</artifactId> <artifactId>log4j</artifactId>

View File

@ -133,11 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>commons-io</artifactId> <artifactId>commons-io</artifactId>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>compile</scope>
</dependency>
<dependency> <dependency>
<groupId>commons-daemon</groupId> <groupId>commons-daemon</groupId>
<artifactId>commons-daemon</artifactId> <artifactId>commons-daemon</artifactId>

View File

@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<scope>provided</scope> <scope>provided</scope>
<exclusions> <exclusions>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion> <exclusion>
<groupId>log4j</groupId> <groupId>log4j</groupId>
<artifactId>log4j</artifactId> <artifactId>log4j</artifactId>

View File

@ -2054,7 +2054,7 @@ public class TestRouterRpc {
@Test @Test
public void testMkdirsWithCallerContext() throws IOException { public void testMkdirsWithCallerContext() throws IOException {
GenericTestUtils.LogCapturer auditlog = GenericTestUtils.LogCapturer auditlog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null // Current callerContext is null
assertNull(CallerContext.getCurrent()); assertNull(CallerContext.getCurrent());
@ -2092,7 +2092,7 @@ public class TestRouterRpc {
@Test @Test
public void testAddClientIpPortToCallerContext() throws IOException { public void testAddClientIpPortToCallerContext() throws IOException {
GenericTestUtils.LogCapturer auditLog = GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientIp and ClientPort are not set on the client. // 1. ClientIp and ClientPort are not set on the client.
// Set client context. // Set client context.
@ -2127,7 +2127,7 @@ public class TestRouterRpc {
@Test @Test
public void testAddClientIdAndCallIdToCallerContext() throws IOException { public void testAddClientIdAndCallIdToCallerContext() throws IOException {
GenericTestUtils.LogCapturer auditLog = GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientId and ClientCallId are not set on the client. // 1. ClientId and ClientCallId are not set on the client.
// Set client context. // Set client context.

View File

@ -440,7 +440,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
@Test @Test
public void testCallerContextWithMultiDestinations() throws IOException { public void testCallerContextWithMultiDestinations() throws IOException {
GenericTestUtils.LogCapturer auditLog = GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// set client context // set client context
CallerContext.setCurrent( CallerContext.setCurrent(

View File

@ -117,11 +117,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>commons-io</artifactId> <artifactId>commons-io</artifactId>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>compile</scope>
</dependency>
<dependency> <dependency>
<groupId>commons-daemon</groupId> <groupId>commons-daemon</groupId>
<artifactId>commons-daemon</artifactId> <artifactId>commons-daemon</artifactId>

View File

@ -31,8 +31,6 @@ import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException; import javax.management.MalformedObjectNameException;
import javax.management.ObjectName; import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
@ -58,13 +56,12 @@ public class MetricsLoggerTask implements Runnable {
} }
} }
private Log metricsLog; private org.apache.log4j.Logger metricsLog;
private String nodeName; private String nodeName;
private short maxLogLineLength; private short maxLogLineLength;
public MetricsLoggerTask(Log metricsLog, String nodeName, public MetricsLoggerTask(String metricsLog, String nodeName, short maxLogLineLength) {
short maxLogLineLength) { this.metricsLog = org.apache.log4j.Logger.getLogger(metricsLog);
this.metricsLog = metricsLog;
this.nodeName = nodeName; this.nodeName = nodeName;
this.maxLogLineLength = maxLogLineLength; this.maxLogLineLength = maxLogLineLength;
} }
@ -118,13 +115,8 @@ public class MetricsLoggerTask implements Runnable {
.substring(0, maxLogLineLength) + "..."); .substring(0, maxLogLineLength) + "...");
} }
private static boolean hasAppenders(Log logger) { private static boolean hasAppenders(org.apache.log4j.Logger logger) {
if (!(logger instanceof Log4JLogger)) { return logger.getAllAppenders().hasMoreElements();
// Don't bother trying to determine the presence of appenders.
return true;
}
Log4JLogger log4JLogger = ((Log4JLogger) logger);
return log4JLogger.getLogger().getAllAppenders().hasMoreElements();
} }
/** /**
@ -150,13 +142,8 @@ public class MetricsLoggerTask implements Runnable {
* Make the metrics logger async and add all pre-existing appenders to the * Make the metrics logger async and add all pre-existing appenders to the
* async appender. * async appender.
*/ */
public static void makeMetricsLoggerAsync(Log metricsLog) { public static void makeMetricsLoggerAsync(String metricsLog) {
if (!(metricsLog instanceof Log4JLogger)) { org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(metricsLog);
LOG.warn("Metrics logging will not be async since "
+ "the logger is not log4j");
return;
}
org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger();
logger.setAdditivity(false); // Don't pollute actual logs with metrics dump logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")

View File

@ -35,7 +35,6 @@ import java.util.Queue;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.Checksum; import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
@ -73,7 +72,7 @@ import org.slf4j.Logger;
**/ **/
class BlockReceiver implements Closeable { class BlockReceiver implements Closeable {
public static final Logger LOG = DataNode.LOG; public static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog; static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
@VisibleForTesting @VisibleForTesting
static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024; static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
@ -1398,7 +1397,7 @@ class BlockReceiver implements Closeable {
public void run() { public void run() {
datanode.metrics.incrDataNodePacketResponderCount(); datanode.metrics.incrDataNodePacketResponderCount();
boolean lastPacketInBlock = false; boolean lastPacketInBlock = false;
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; final long startTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
while (isRunning() && !lastPacketInBlock) { while (isRunning() && !lastPacketInBlock) {
long totalAckTimeNanos = 0; long totalAckTimeNanos = 0;
boolean isInterrupted = false; boolean isInterrupted = false;
@ -1553,7 +1552,7 @@ class BlockReceiver implements Closeable {
// Hold a volume reference to finalize block. // Hold a volume reference to finalize block.
try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) { try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
BlockReceiver.this.close(); BlockReceiver.this.close();
endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; endTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
block.setNumBytes(replicaInfo.getNumBytes()); block.setNumBytes(replicaInfo.getNumBytes());
datanode.data.finalizeBlock(block, dirSyncOnFinalize); datanode.data.finalizeBlock(block, dirSyncOnFinalize);
} }
@ -1564,11 +1563,11 @@ class BlockReceiver implements Closeable {
datanode.closeBlock(block, null, replicaInfo.getStorageUuid(), datanode.closeBlock(block, null, replicaInfo.getStorageUuid(),
replicaInfo.isOnTransientStorage()); replicaInfo.isOnTransientStorage());
if (ClientTraceLog.isInfoEnabled() && isClient) { if (CLIENT_TRACE_LOG.isInfoEnabled() && isClient) {
long offset = 0; long offset = 0;
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
.getBlockPoolId()); .getBlockPoolId());
ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr, CLIENT_TRACE_LOG.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
myAddr, replicaInfo.getVolume(), block.getNumBytes(), myAddr, replicaInfo.getVolume(), block.getNumBytes(),
"HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(), "HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(),
block, endTime - startTime)); block, endTime - startTime));

View File

@ -32,7 +32,6 @@ import java.nio.channels.FileChannel;
import java.util.Arrays; import java.util.Arrays;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
@ -103,7 +102,7 @@ import org.slf4j.Logger;
*/ */
class BlockSender implements java.io.Closeable { class BlockSender implements java.io.Closeable {
static final Logger LOG = DataNode.LOG; static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog; static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
private static final boolean is32Bit = private static final boolean is32Bit =
System.getProperty("sun.arch.data.model").equals("32"); System.getProperty("sun.arch.data.model").equals("32");
/** /**
@ -784,7 +783,7 @@ class BlockSender implements java.io.Closeable {
// Trigger readahead of beginning of file if configured. // Trigger readahead of beginning of file if configured.
manageOsCache(); manageOsCache();
final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0; final long startTime = CLIENT_TRACE_LOG.isDebugEnabled() ? System.nanoTime() : 0;
try { try {
int maxChunksPerPacket; int maxChunksPerPacket;
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN; int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
@ -831,9 +830,9 @@ class BlockSender implements java.io.Closeable {
sentEntireByteRange = true; sentEntireByteRange = true;
} }
} finally { } finally {
if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) { if ((clientTraceFmt != null) && CLIENT_TRACE_LOG.isDebugEnabled()) {
final long endTime = System.nanoTime(); final long endTime = System.nanoTime();
ClientTraceLog.debug(String.format(clientTraceFmt, totalRead, CLIENT_TRACE_LOG.debug(String.format(clientTraceFmt, totalRead,
initialOffset, endTime - startTime)); initialOffset, endTime - startTime));
} }
close(); close();

View File

@ -140,8 +140,6 @@ import javax.annotation.Nullable;
import javax.management.ObjectName; import javax.management.ObjectName;
import javax.net.SocketFactory; import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -319,8 +317,8 @@ public class DataNode extends ReconfigurableBase
", blockid: %s" + // block id ", blockid: %s" + // block id
", duration(ns): %s"; // duration time ", duration(ns): %s"; // duration time
static final Log ClientTraceLog = static final Logger CLIENT_TRACE_LOG =
LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace");
private static final String USAGE = private static final String USAGE =
"Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" + "Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" +
@ -360,7 +358,7 @@ public class DataNode extends ReconfigurableBase
FS_GETSPACEUSED_JITTER_KEY, FS_GETSPACEUSED_JITTER_KEY,
FS_GETSPACEUSED_CLASSNAME)); FS_GETSPACEUSED_CLASSNAME));
public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog"); public static final String METRICS_LOG_NAME = "DataNodeMetricsLog";
private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace."; private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace.";
private final FileIoProvider fileIoProvider; private final FileIoProvider fileIoProvider;
@ -4060,12 +4058,12 @@ public class DataNode extends ReconfigurableBase
return; return;
} }
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG); MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
// Schedule the periodic logging. // Schedule the periodic logging.
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1); metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG, metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
"DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec, "DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
TimeUnit.SECONDS); TimeUnit.SECONDS);
} }

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
@ -29,11 +27,14 @@ import java.util.HashMap;
import java.util.Stack; import java.util.Stack;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Class for maintain a set of lock for fsDataSetImpl. * Class for maintain a set of lock for fsDataSetImpl.
*/ */
public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetLock> { public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetLock> {
public static final Log LOG = LogFactory.getLog(DataSetLockManager.class); public static final Logger LOG = LoggerFactory.getLogger(DataSetLockManager.class);
private final HashMap<String, TrackLog> threadCountMap = new HashMap<>(); private final HashMap<String, TrackLog> threadCountMap = new HashMap<>();
private final LockMap lockMap = new LockMap(); private final LockMap lockMap = new LockMap();
private boolean isFair = true; private boolean isFair = true;

View File

@ -21,7 +21,6 @@ import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.ByteString;
import javax.crypto.SecretKey; import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
@ -105,7 +104,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
*/ */
class DataXceiver extends Receiver implements Runnable { class DataXceiver extends Receiver implements Runnable {
public static final Logger LOG = DataNode.LOG; public static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog; static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
private Peer peer; private Peer peer;
private final String remoteAddress; // address of remote side private final String remoteAddress; // address of remote side
@ -426,10 +425,10 @@ class DataXceiver extends Receiver implements Runnable {
registeredSlotId); registeredSlotId);
datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId); datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
} }
if (ClientTraceLog.isInfoEnabled()) { if (CLIENT_TRACE_LOG.isInfoEnabled()) {
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
.getBlockPoolId()); .getBlockPoolId());
BlockSender.ClientTraceLog.info(String.format( BlockSender.CLIENT_TRACE_LOG.info(String.format(
"src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," + "src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
" blockid: %s, srvID: %s, success: %b", " blockid: %s, srvID: %s, success: %b",
blk.getBlockId(), dnR.getDatanodeUuid(), success)); blk.getBlockId(), dnR.getDatanodeUuid(), success));
@ -466,8 +465,8 @@ class DataXceiver extends Receiver implements Runnable {
bld.build().writeDelimitedTo(socketOut); bld.build().writeDelimitedTo(socketOut);
success = true; success = true;
} finally { } finally {
if (ClientTraceLog.isInfoEnabled()) { if (CLIENT_TRACE_LOG.isInfoEnabled()) {
BlockSender.ClientTraceLog.info(String.format( BlockSender.CLIENT_TRACE_LOG.info(String.format(
"src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," + "src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b", " shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
slotId.getShmId().getHi(), slotId.getShmId().getLo(), slotId.getShmId().getHi(), slotId.getShmId().getLo(),
@ -526,9 +525,9 @@ class DataXceiver extends Receiver implements Runnable {
sendShmSuccessResponse(sock, shmInfo); sendShmSuccessResponse(sock, shmInfo);
success = true; success = true;
} finally { } finally {
if (ClientTraceLog.isInfoEnabled()) { if (CLIENT_TRACE_LOG.isInfoEnabled()) {
if (success) { if (success) {
BlockSender.ClientTraceLog.info(String.format( BlockSender.CLIENT_TRACE_LOG.info(String.format(
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
"op: REQUEST_SHORT_CIRCUIT_SHM," + "op: REQUEST_SHORT_CIRCUIT_SHM," +
" shmId: %016x%016x, srvID: %s, success: true", " shmId: %016x%016x, srvID: %s, success: true",
@ -536,7 +535,7 @@ class DataXceiver extends Receiver implements Runnable {
shmInfo.getShmId().getLo(), shmInfo.getShmId().getLo(),
datanode.getDatanodeUuid())); datanode.getDatanodeUuid()));
} else { } else {
BlockSender.ClientTraceLog.info(String.format( BlockSender.CLIENT_TRACE_LOG.info(String.format(
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
"op: REQUEST_SHORT_CIRCUIT_SHM, " + "op: REQUEST_SHORT_CIRCUIT_SHM, " +
"shmId: n/a, srvID: %s, success: false", "shmId: n/a, srvID: %s, success: false",
@ -587,13 +586,10 @@ class DataXceiver extends Receiver implements Runnable {
BlockSender blockSender = null; BlockSender blockSender = null;
DatanodeRegistration dnR = DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(block.getBlockPoolId()); datanode.getDNRegistrationForBP(block.getBlockPoolId());
final String clientTraceFmt = final String clientTraceFmt = clientName.length() > 0 && CLIENT_TRACE_LOG.isInfoEnabled() ?
clientName.length() > 0 && ClientTraceLog.isInfoEnabled() String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "", "%d", "HDFS_READ",
? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, clientName, "%d", dnR.getDatanodeUuid(), block, "%d") :
"", "%d", "HDFS_READ", clientName, "%d", dnR + " Served block " + block + " to " + remoteAddress;
dnR.getDatanodeUuid(), block, "%d")
: dnR + " Served block " + block + " to " +
remoteAddress;
try { try {
try { try {

View File

@ -185,9 +185,6 @@ import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName; import javax.management.ObjectName;
import javax.management.StandardMBean; import javax.management.StandardMBean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -405,7 +402,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
private final String contextFieldSeparator; private final String contextFieldSeparator;
boolean isAuditEnabled() { boolean isAuditEnabled() {
return (!isDefaultAuditLogger || auditLog.isInfoEnabled()) return (!isDefaultAuditLogger || AUDIT_LOG.isInfoEnabled())
&& !auditLoggers.isEmpty(); && !auditLoggers.isEmpty();
} }
@ -491,8 +488,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* perm=&lt;permissions (optional)&gt; * perm=&lt;permissions (optional)&gt;
* </code> * </code>
*/ */
public static final Log auditLog = LogFactory.getLog( public static final Logger AUDIT_LOG = Logger.getLogger(FSNamesystem.class.getName() + ".audit");
FSNamesystem.class.getName() + ".audit");
private final int maxCorruptFileBlocksReturn; private final int maxCorruptFileBlocksReturn;
private final boolean isPermissionEnabled; private final boolean isPermissionEnabled;
@ -8783,8 +8779,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
FileStatus status, CallerContext callerContext, UserGroupInformation ugi, FileStatus status, CallerContext callerContext, UserGroupInformation ugi,
DelegationTokenSecretManager dtSecretManager) { DelegationTokenSecretManager dtSecretManager) {
if (auditLog.isDebugEnabled() || if (AUDIT_LOG.isDebugEnabled() ||
(auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) { (AUDIT_LOG.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
final StringBuilder sb = STRING_BUILDER.get(); final StringBuilder sb = STRING_BUILDER.get();
src = escapeJava(src); src = escapeJava(src);
dst = escapeJava(dst); dst = escapeJava(dst);
@ -8853,16 +8849,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
public void logAuditMessage(String message) { public void logAuditMessage(String message) {
auditLog.info(message); AUDIT_LOG.info(message);
} }
} }
private static void enableAsyncAuditLog(Configuration conf) { private static void enableAsyncAuditLog(Configuration conf) {
if (!(auditLog instanceof Log4JLogger)) { Logger logger = AUDIT_LOG;
LOG.warn("Log4j is required to enable async auditlog");
return;
}
Logger logger = ((Log4JLogger)auditLog).getLogger();
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders()); List<Appender> appenders = Collections.list(logger.getAllAppenders());
// failsafe against trying to async it more than once // failsafe against trying to async it more than once

View File

@ -17,9 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
@ -125,15 +122,10 @@ public class FsImageValidation {
} }
static void setLogLevel(Class<?> clazz, Level level) { static void setLogLevel(Class<?> clazz, Level level) {
final Log log = LogFactory.getLog(clazz); final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
if (log instanceof Log4JLogger) {
final org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
logger.setLevel(level); logger.setLevel(level);
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
clazz.getName(), level, logger.getEffectiveLevel()); logger.getEffectiveLevel());
} else {
LOG.warn("Failed setLogLevel {} to {}", clazz.getName(), level);
}
} }
static String toCommaSeparatedNumber(long n) { static String toCommaSeparatedNumber(long n) {

View File

@ -25,8 +25,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Preconditions;
import java.util.Set; import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -427,8 +425,7 @@ public class NameNode extends ReconfigurableBase implements
private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace."; private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
public static final Log MetricsLog = public static final String METRICS_LOG_NAME = "NameNodeMetricsLog";
LogFactory.getLog("NameNodeMetricsLog");
protected FSNamesystem namesystem; protected FSNamesystem namesystem;
protected final NamenodeRole role; protected final NamenodeRole role;
@ -949,13 +946,13 @@ public class NameNode extends ReconfigurableBase implements
return; return;
} }
MetricsLoggerTask.makeMetricsLoggerAsync(MetricsLog); MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
// Schedule the periodic logging. // Schedule the periodic logging.
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1); metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy( metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
false); false);
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(MetricsLog, metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
"NameNode", (short) 128), "NameNode", (short) 128),
metricsLoggerPeriodSec, metricsLoggerPeriodSec,
metricsLoggerPeriodSec, metricsLoggerPeriodSec,

View File

@ -21,8 +21,6 @@ import java.net.URI;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -41,6 +39,8 @@ import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.junit.Assert.*; import static org.junit.Assert.*;
@ -52,7 +52,7 @@ import static org.junit.Assert.*;
*/ */
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase { public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
public static final Log LOG = LogFactory.getLog(TestPermission.class); public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class);
private HdfsConfiguration conf; private HdfsConfiguration conf;
private MiniDFSCluster cluster; private MiniDFSCluster cluster;

View File

@ -190,7 +190,7 @@ public class TestDFSRename {
Path path = new Path("/test"); Path path = new Path("/test");
dfs.mkdirs(path); dfs.mkdirs(path);
GenericTestUtils.LogCapturer auditLog = GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
dfs.rename(path, new Path("/dir1"), dfs.rename(path, new Path("/dir1"),
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH}); new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
String auditOut = auditLog.getOutput(); String auditOut = auditLog.getOutput();

View File

@ -17,8 +17,6 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -29,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level; import org.slf4j.event.Level;
import java.io.IOException; import java.io.IOException;
@ -47,7 +47,7 @@ import static org.junit.Assert.fail;
* Tests append on erasure coded file. * Tests append on erasure coded file.
*/ */
public class TestStripedFileAppend { public class TestStripedFileAppend {
public static final Log LOG = LogFactory.getLog(TestStripedFileAppend.class); public static final Logger LOG = LoggerFactory.getLogger(TestStripedFileAppend.class);
static { static {
DFSTestUtil.setNameNodeLogLevel(Level.TRACE); DFSTestUtil.setNameNodeLogLevel(Level.TRACE);

View File

@ -32,8 +32,6 @@ import java.util.Random;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -135,8 +133,7 @@ public class TestDataNodeMetricsLogger {
public void testMetricsLoggerIsAsync() throws IOException { public void testMetricsLoggerIsAsync() throws IOException {
startDNForTest(true); startDNForTest(true);
assertNotNull(dn); assertNotNull(dn);
org.apache.log4j.Logger logger = ((Log4JLogger) DataNode.METRICS_LOG) org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
.getLogger();
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders()); List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertTrue(appenders.get(0) instanceof AsyncAppender); assertTrue(appenders.get(0) instanceof AsyncAppender);
@ -156,7 +153,7 @@ public class TestDataNodeMetricsLogger {
assertNotNull(dn); assertNotNull(dn);
final PatternMatchingAppender appender = new PatternMatchingAppender( final PatternMatchingAppender appender = new PatternMatchingAppender(
"^.*FakeMetric.*$"); "^.*FakeMetric.*$");
addAppender(DataNode.METRICS_LOG, appender); addAppender(org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME), appender);
// Ensure that the supplied pattern was matched. // Ensure that the supplied pattern was matched.
GenericTestUtils.waitFor(new Supplier<Boolean>() { GenericTestUtils.waitFor(new Supplier<Boolean>() {
@ -169,8 +166,7 @@ public class TestDataNodeMetricsLogger {
dn.shutdown(); dn.shutdown();
} }
private void addAppender(Log log, Appender appender) { private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders()); List<Appender> appenders = Collections.list(logger.getAllAppenders());
((AsyncAppender) appenders.get(0)).addAppender(appender); ((AsyncAppender) appenders.get(0)).addAppender(appender);

View File

@ -26,10 +26,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.Timeout; import org.junit.rules.Timeout;
import org.slf4j.event.Level;
import java.net.Inet4Address; import java.net.Inet4Address;
import java.util.Arrays; import java.util.Arrays;
@ -61,7 +62,7 @@ public class TestAuditLogAtDebug {
Joiner.on(",").join(debugCommands.get())); Joiner.on(",").join(debugCommands.get()));
} }
logger.initialize(conf); logger.initialize(conf);
GenericTestUtils.setLogLevel(FSNamesystem.auditLog, level); GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, level);
return spy(logger); return spy(logger);
} }

View File

@ -258,7 +258,7 @@ public class TestAuditLogger {
conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40); conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40);
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog); LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
cluster.waitClusterUp(); cluster.waitClusterUp();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
final long time = System.currentTimeMillis(); final long time = System.currentTimeMillis();
@ -568,7 +568,7 @@ public class TestAuditLogger {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster1 = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster1 = new MiniDFSCluster.Builder(conf).build();
try { try {
LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog); LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
cluster1.waitClusterUp(); cluster1.waitClusterUp();
FileSystem fs = cluster1.getFileSystem(); FileSystem fs = cluster1.getFileSystem();
long time = System.currentTimeMillis(); long time = System.currentTimeMillis();
@ -585,7 +585,7 @@ public class TestAuditLogger {
conf.setBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY, true); conf.setBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY, true);
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
try { try {
LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog); LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
cluster2.waitClusterUp(); cluster2.waitClusterUp();
FileSystem fs = cluster2.getFileSystem(); FileSystem fs = cluster2.getFileSystem();
long time = System.currentTimeMillis(); long time = System.currentTimeMillis();
@ -606,7 +606,7 @@ public class TestAuditLogger {
conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40); conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40);
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog); LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
cluster.waitClusterUp(); cluster.waitClusterUp();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
final long time = System.currentTimeMillis(); final long time = System.currentTimeMillis();

View File

@ -93,7 +93,7 @@ public class TestAuditLoggerWithCommands {
user2 = user2 =
UserGroupInformation.createUserForTesting("theEngineer", UserGroupInformation.createUserForTesting("theEngineer",
new String[]{"hadoop"}); new String[]{"hadoop"});
auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog); auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
proto = cluster.getNameNodeRpc(); proto = cluster.getNameNodeRpc();
fileSys = DFSTestUtil.getFileSystemAs(user1, conf); fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
fs2 = DFSTestUtil.getFileSystemAs(user2, conf); fs2 = DFSTestUtil.getFileSystemAs(user2, conf);

View File

@ -32,7 +32,6 @@ import java.util.Enumeration;
import java.util.List; import java.util.List;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -130,7 +129,7 @@ public class TestAuditLogs {
util.createFiles(fs, fileName); util.createFiles(fs, fileName);
// make sure the appender is what it's supposed to be // make sure the appender is what it's supposed to be
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders()); List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertEquals(1, appenders.size()); assertEquals(1, appenders.size());
@ -283,7 +282,7 @@ public class TestAuditLogs {
/** Sets up log4j logger for auditlogs */ /** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException { private void setupAuditLogs() throws IOException {
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
// enable logging now that the test is ready to run // enable logging now that the test is ready to run
logger.setLevel(Level.INFO); logger.setLevel(Level.INFO);
} }
@ -303,7 +302,7 @@ public class TestAuditLogs {
disableAuditLog(); disableAuditLog();
PatternLayout layout = new PatternLayout("%m%n"); PatternLayout layout = new PatternLayout("%m%n");
RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile); RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
logger.addAppender(appender); logger.addAppender(appender);
} }
@ -319,7 +318,7 @@ public class TestAuditLogs {
disableAuditLog(); disableAuditLog();
// Close the appenders and force all logs to be flushed // Close the appenders and force all logs to be flushed
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
Enumeration<?> appenders = logger.getAllAppenders(); Enumeration<?> appenders = logger.getAllAppenders();
while (appenders.hasMoreElements()) { while (appenders.hasMoreElements()) {
Appender appender = (Appender)appenders.nextElement(); Appender appender = (Appender)appenders.nextElement();
@ -352,7 +351,7 @@ public class TestAuditLogs {
disableAuditLog(); disableAuditLog();
// Close the appenders and force all logs to be flushed // Close the appenders and force all logs to be flushed
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
Enumeration<?> appenders = logger.getAllAppenders(); Enumeration<?> appenders = logger.getAllAppenders();
while (appenders.hasMoreElements()) { while (appenders.hasMoreElements()) {
Appender appender = (Appender)appenders.nextElement(); Appender appender = (Appender)appenders.nextElement();

View File

@ -61,7 +61,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import java.util.function.Supplier; import java.util.function.Supplier;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -252,7 +251,7 @@ public class TestFsck {
if (file.exists()) { if (file.exists()) {
file.delete(); file.delete();
} }
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
logger.removeAllAppenders(); logger.removeAllAppenders();
logger.setLevel(Level.INFO); logger.setLevel(Level.INFO);
PatternLayout layout = new PatternLayout("%m%n"); PatternLayout layout = new PatternLayout("%m%n");
@ -291,7 +290,7 @@ public class TestFsck {
if (reader != null) { if (reader != null) {
reader.close(); reader.close();
} }
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); Logger logger = FSNamesystem.AUDIT_LOG;
if (logger != null) { if (logger != null) {
logger.removeAllAppenders(); logger.removeAllAppenders();
} }

View File

@ -19,8 +19,6 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.util.function.Supplier; import java.util.function.Supplier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -70,8 +68,7 @@ public class TestNameNodeMetricsLogger {
@Test @Test
public void testMetricsLoggerIsAsync() throws IOException { public void testMetricsLoggerIsAsync() throws IOException {
makeNameNode(true); makeNameNode(true);
org.apache.log4j.Logger logger = org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
((Log4JLogger) NameNode.MetricsLog).getLogger();
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders()); List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertTrue(appenders.get(0) instanceof AsyncAppender); assertTrue(appenders.get(0) instanceof AsyncAppender);
@ -90,7 +87,7 @@ public class TestNameNodeMetricsLogger {
makeNameNode(true); // Log metrics early and often. makeNameNode(true); // Log metrics early and often.
final PatternMatchingAppender appender = final PatternMatchingAppender appender =
new PatternMatchingAppender("^.*FakeMetric42.*$"); new PatternMatchingAppender("^.*FakeMetric42.*$");
addAppender(NameNode.MetricsLog, appender); addAppender(org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME), appender);
// Ensure that the supplied pattern was matched. // Ensure that the supplied pattern was matched.
GenericTestUtils.waitFor(new Supplier<Boolean>() { GenericTestUtils.waitFor(new Supplier<Boolean>() {
@ -118,8 +115,7 @@ public class TestNameNodeMetricsLogger {
return new TestNameNode(conf); return new TestNameNode(conf);
} }
private void addAppender(Log log, Appender appender) { private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders()); List<Appender> appenders = Collections.list(logger.getAllAppenders());
((AsyncAppender) appenders.get(0)).addAppender(appender); ((AsyncAppender) appenders.get(0)).addAppender(appender);

View File

@ -45,7 +45,7 @@ import java.util.function.Supplier;
*/ */
public class TestDNFencingWithReplication { public class TestDNFencingWithReplication {
static { static {
GenericTestUtils.setLogLevel(FSNamesystem.auditLog, Level.WARN); GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, org.apache.log4j.Level.WARN);
GenericTestUtils.setLogLevel(Server.LOG, Level.ERROR); GenericTestUtils.setLogLevel(Server.LOG, Level.ERROR);
GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.ERROR); GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.ERROR);
} }

View File

@ -18,18 +18,18 @@
package org.apache.hadoop.mapreduce.v2.app.speculate.forecast; package org.apache.hadoop.mapreduce.v2.app.speculate.forecast;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.util.ControlledClock;
/** /**
* Testing the statistical model of simple exponential estimator. * Testing the statistical model of simple exponential estimator.
*/ */
public class TestSimpleExponentialForecast { public class TestSimpleExponentialForecast {
private static final Log LOG = private static final Logger LOG = LoggerFactory.getLogger(TestSimpleExponentialForecast.class);
LogFactory.getLog(TestSimpleExponentialForecast.class);
private static long clockTicks = 1000L; private static long clockTicks = 1000L;
private ControlledClock clock; private ControlledClock clock;

View File

@ -28,8 +28,6 @@ import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -66,6 +64,8 @@ import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Test speculation on Mini Cluster. * Test speculation on Mini Cluster.
@ -73,8 +73,7 @@ import org.junit.runners.Parameterized;
@Ignore @Ignore
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TestSpeculativeExecOnCluster { public class TestSpeculativeExecOnCluster {
private static final Log LOG = LogFactory private static final Logger LOG = LoggerFactory.getLogger(TestSpeculativeExecOnCluster.class);
.getLog(TestSpeculativeExecOnCluster.class);
private static final int NODE_MANAGERS_COUNT = 2; private static final int NODE_MANAGERS_COUNT = 2;
private static final boolean ENABLE_SPECULATIVE_MAP = true; private static final boolean ENABLE_SPECULATIVE_MAP = true;

View File

@ -132,11 +132,6 @@
<groupId>io.netty</groupId> <groupId>io.netty</groupId>
<artifactId>netty-all</artifactId> <artifactId>netty-all</artifactId>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>provided</scope>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop.thirdparty</groupId> <groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId> <artifactId>hadoop-shaded-guava</artifactId>

View File

@ -38,10 +38,6 @@
<groupId>commons-cli</groupId> <groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId> <artifactId>commons-cli</artifactId>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId> <artifactId>hadoop-mapreduce-client-jobclient</artifactId>

View File

@ -121,7 +121,6 @@
<commons-io.version>2.11.0</commons-io.version> <commons-io.version>2.11.0</commons-io.version>
<commons-lang3.version>3.12.0</commons-lang3.version> <commons-lang3.version>3.12.0</commons-lang3.version>
<commons-logging.version>1.1.3</commons-logging.version> <commons-logging.version>1.1.3</commons-logging.version>
<commons-logging-api.version>1.1</commons-logging-api.version>
<commons-math3.version>3.6.1</commons-math3.version> <commons-math3.version>3.6.1</commons-math3.version>
<commons-net.version>3.9.0</commons-net.version> <commons-net.version>3.9.0</commons-net.version>
<commons-text.version>1.10.0</commons-text.version> <commons-text.version>1.10.0</commons-text.version>
@ -1094,11 +1093,6 @@
</exclusion> </exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging-api</artifactId>
<version>${commons-logging-api.version}</version>
</dependency>
<dependency> <dependency>
<groupId>log4j</groupId> <groupId>log4j</groupId>
<artifactId>log4j</artifactId> <artifactId>log4j</artifactId>

View File

@ -101,11 +101,6 @@
<artifactId>commons-io</artifactId> <artifactId>commons-io</artifactId>
<scope>provided</scope> <scope>provided</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>provided</scope>
</dependency>
<dependency> <dependency>
<groupId>commons-cli</groupId> <groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId> <artifactId>commons-cli</artifactId>

View File

@ -30,8 +30,6 @@ import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
@ -39,6 +37,8 @@ import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.PageRange; import com.microsoft.azure.storage.blob.PageRange;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* An input stream that reads file data from a page blob stored * An input stream that reads file data from a page blob stored
@ -46,7 +46,7 @@ import com.microsoft.azure.storage.blob.PageRange;
*/ */
final class PageBlobInputStream extends InputStream { final class PageBlobInputStream extends InputStream {
private static final Log LOG = LogFactory.getLog(PageBlobInputStream.class); private static final Logger LOG = LoggerFactory.getLogger(PageBlobInputStream.class);
// The blob we're reading from. // The blob we're reading from.
private final CloudPageBlobWrapper blob; private final CloudPageBlobWrapper blob;

View File

@ -39,8 +39,6 @@ import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
@ -48,7 +46,8 @@ import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.BlobRequestOptions;
import com.microsoft.azure.storage.blob.CloudPageBlob; import com.microsoft.azure.storage.blob.CloudPageBlob;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* An output stream that write file data to a page blob stored using ASV's * An output stream that write file data to a page blob stored using ASV's
@ -120,7 +119,7 @@ final class PageBlobOutputStream extends OutputStream implements Syncable, Strea
// Whether the stream has been closed. // Whether the stream has been closed.
private boolean closed = false; private boolean closed = false;
public static final Log LOG = LogFactory.getLog(AzureNativeFileSystemStore.class); public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
// Set the minimum page blob file size to 128MB, which is >> the default // Set the minimum page blob file size to 128MB, which is >> the default
// block size of 32MB. This default block size is often used as the // block size of 32MB. This default block size is often used as the

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.fs.azure; package org.apache.hadoop.fs.azure;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
@ -27,6 +25,8 @@ import org.apache.hadoop.classification.VisibleForTesting;
import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlob;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -58,7 +58,7 @@ public class SelfRenewingLease {
// Time to wait to renew lease in milliseconds // Time to wait to renew lease in milliseconds
public static final int LEASE_RENEWAL_PERIOD = 40000; public static final int LEASE_RENEWAL_PERIOD = 40000;
private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class); private static final Logger LOG = LoggerFactory.getLogger(SelfRenewingLease.class);
// Used to allocate thread serial numbers in thread name // Used to allocate thread serial numbers in thread name
private static AtomicInteger threadNumber = new AtomicInteger(0); private static AtomicInteger threadNumber = new AtomicInteger(0);

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.fs.azure;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.util.Date; import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.OperationContext;
@ -30,6 +28,8 @@ import com.microsoft.azure.storage.RequestResult;
import com.microsoft.azure.storage.ResponseReceivedEvent; import com.microsoft.azure.storage.ResponseReceivedEvent;
import com.microsoft.azure.storage.SendingRequestEvent; import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageEvent; import com.microsoft.azure.storage.StorageEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/* /*
* Self throttling is implemented by hooking into send & response callbacks * Self throttling is implemented by hooking into send & response callbacks
@ -63,8 +63,7 @@ import com.microsoft.azure.storage.StorageEvent;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class SelfThrottlingIntercept { public class SelfThrottlingIntercept {
public static final Log LOG = LogFactory public static final Logger LOG = LoggerFactory.getLogger(SelfThrottlingIntercept.class);
.getLog(SelfThrottlingIntercept.class);
private final float readFactor; private final float readFactor;
private final float writeFactor; private final float writeFactor;

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.fs.azure;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.security.InvalidKeyException; import java.security.InvalidKeyException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.Constants.HeaderConstants; import com.microsoft.azure.storage.Constants.HeaderConstants;
@ -40,8 +38,6 @@ import com.microsoft.azure.storage.StorageException;
@InterfaceAudience.Private @InterfaceAudience.Private
public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent> { public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent> {
public static final Log LOG = LogFactory.getLog(SendRequestIntercept.class);
private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*"; private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*";
/** /**

View File

@ -20,8 +20,9 @@ package org.apache.hadoop.fs.azure;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.ProviderUtils;
@ -32,7 +33,7 @@ import org.apache.hadoop.security.ProviderUtils;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class SimpleKeyProvider implements KeyProvider { public class SimpleKeyProvider implements KeyProvider {
private static final Log LOG = LogFactory.getLog(SimpleKeyProvider.class); private static final Logger LOG = LoggerFactory.getLogger(SimpleKeyProvider.class);
protected static final String KEY_ACCOUNT_KEY_PREFIX = protected static final String KEY_ACCOUNT_KEY_PREFIX =
"fs.azure.account.key."; "fs.azure.account.key.";

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.fs.azure.metrics;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
/** /**
@ -31,8 +29,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public final class BandwidthGaugeUpdater { public final class BandwidthGaugeUpdater {
public static final Log LOG = LogFactory
.getLog(BandwidthGaugeUpdater.class);
public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater"; public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater";

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.fs.azure.metrics;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import com.microsoft.azure.storage.Constants.HeaderConstants; import com.microsoft.azure.storage.Constants.HeaderConstants;
@ -38,8 +36,6 @@ import com.microsoft.azure.storage.StorageEvent;
@InterfaceAudience.Private @InterfaceAudience.Private
public final class ResponseReceivedMetricUpdater extends StorageEvent<ResponseReceivedEvent> { public final class ResponseReceivedMetricUpdater extends StorageEvent<ResponseReceivedEvent> {
public static final Log LOG = LogFactory.getLog(ResponseReceivedMetricUpdater.class);
private final AzureFileSystemInstrumentation instrumentation; private final AzureFileSystemInstrumentation instrumentation;
private final BandwidthGaugeUpdater blockUploadGaugeUpdater; private final BandwidthGaugeUpdater blockUploadGaugeUpdater;

View File

@ -28,7 +28,6 @@ import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -41,6 +40,8 @@ import org.junit.rules.ExpectedException;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations. * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
@ -70,8 +71,7 @@ public class ITestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
fs.initialize(uri, conf); fs.initialize(uri, conf);
// Capture logs // Capture logs
logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
.getRootLogger()));
} }
/* /*

View File

@ -21,12 +21,13 @@ package org.apache.hadoop.fs.azure;
import java.net.URI; import java.net.URI;
import java.util.StringTokenizer; import java.util.StringTokenizer;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.log4j.Logger;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Test to validate Azure storage client side logging. Tests works only when * Test to validate Azure storage client side logging. Tests works only when
@ -94,8 +95,8 @@ public class ITestNativeAzureFileSystemClientLogging
@Test @Test
public void testLoggingEnabled() throws Exception { public void testLoggingEnabled() throws Exception {
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger LogCapturer logs =
.getRootLogger())); LogCapturer.captureLogs(LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME));
// Update configuration based on the Test. // Update configuration based on the Test.
updateFileSystemConfiguration(true); updateFileSystemConfiguration(true);
@ -116,8 +117,7 @@ public class ITestNativeAzureFileSystemClientLogging
@Test @Test
public void testLoggingDisabled() throws Exception { public void testLoggingDisabled() throws Exception {
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger LogCapturer logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
.getRootLogger()));
// Update configuration based on the Test. // Update configuration based on the Test.
updateFileSystemConfiguration(false); updateFileSystemConfiguration(false);

View File

@ -30,8 +30,6 @@ import java.util.Date;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.TimeZone; import java.util.TimeZone;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -49,6 +47,8 @@ import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlob;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.readStringFromFile; import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.readStringFromFile;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.writeStringToFile; import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.writeStringToFile;
@ -73,7 +73,7 @@ public abstract class NativeAzureFileSystemBaseTest
private static final EnumSet<XAttrSetFlag> CREATE_FLAG = EnumSet.of(XAttrSetFlag.CREATE); private static final EnumSet<XAttrSetFlag> CREATE_FLAG = EnumSet.of(XAttrSetFlag.CREATE);
private static final EnumSet<XAttrSetFlag> REPLACE_FLAG = EnumSet.of(XAttrSetFlag.REPLACE); private static final EnumSet<XAttrSetFlag> REPLACE_FLAG = EnumSet.of(XAttrSetFlag.REPLACE);
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class); public static final Logger LOG = LoggerFactory.getLogger(NativeAzureFileSystemBaseTest.class);
protected NativeAzureFileSystem fs; protected NativeAzureFileSystem fs;
@Override @Override

View File

@ -23,10 +23,10 @@ import java.nio.charset.Charset;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException;
@ -39,8 +39,7 @@ import static org.junit.Assert.assertEquals;
* *
*/ */
public class TestShellDecryptionKeyProvider { public class TestShellDecryptionKeyProvider {
public static final Log LOG = LogFactory public static final Logger LOG = LoggerFactory.getLogger(TestShellDecryptionKeyProvider.class);
.getLog(TestShellDecryptionKeyProvider.class);
private static final File TEST_ROOT_DIR = new File(System.getProperty( private static final File TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp"), "TestShellDecryptionKeyProvider"); "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");

View File

@ -34,8 +34,6 @@ import org.apache.hadoop.yarn.appcatalog.model.Application;
import org.apache.hadoop.yarn.appcatalog.utils.RandomWord; import org.apache.hadoop.yarn.appcatalog.utils.RandomWord;
import org.apache.hadoop.yarn.appcatalog.utils.WordLengthException; import org.apache.hadoop.yarn.appcatalog.utils.WordLengthException;
import org.apache.hadoop.yarn.service.api.records.Service; import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrQuery.ORDER; import org.apache.solr.client.solrj.SolrQuery.ORDER;
@ -48,13 +46,15 @@ import org.apache.solr.common.SolrInputDocument;
import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Driver class for accessing Solr. * Driver class for accessing Solr.
*/ */
public class AppCatalogSolrClient { public class AppCatalogSolrClient {
private static final Log LOG = LogFactory.getLog(AppCatalogSolrClient.class); private static final Logger LOG = LoggerFactory.getLogger(AppCatalogSolrClient.class);
private static String urlString; private static String urlString;
public AppCatalogSolrClient() { public AppCatalogSolrClient() {

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.appcatalog.application;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.appcatalog.model.AppEntry; import org.apache.hadoop.yarn.appcatalog.model.AppEntry;
@ -39,13 +37,15 @@ import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException; import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.config.DefaultClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Driver class for calling YARN Resource Manager REST API. * Driver class for calling YARN Resource Manager REST API.
*/ */
public class YarnServiceClient { public class YarnServiceClient {
private static final Log LOG = LogFactory.getLog(YarnServiceClient.class); private static final Logger LOG = LoggerFactory.getLogger(YarnServiceClient.class);
private static Configuration conf = new Configuration(); private static Configuration conf = new Configuration();
private static ClientConfig getClientConfig() { private static ClientConfig getClientConfig() {
ClientConfig config = new DefaultClientConfig(); ClientConfig config = new DefaultClientConfig();

View File

@ -21,8 +21,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -81,6 +79,8 @@ import java.util.regex.Matcher;
import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP;
@ -136,8 +136,7 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
@InterfaceStability.Unstable @InterfaceStability.Unstable
public class RuncContainerRuntime extends OCIContainerRuntime { public class RuncContainerRuntime extends OCIContainerRuntime {
private static final Log LOG = LogFactory.getLog( private static final Logger LOG = LoggerFactory.getLogger(RuncContainerRuntime.class);
RuncContainerRuntime.class);
@InterfaceAudience.Private @InterfaceAudience.Private
private static final String RUNTIME_TYPE = "RUNC"; private static final String RUNTIME_TYPE = "RUNC";

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -45,6 +43,8 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR;
@ -78,8 +78,7 @@ public class ImageTagToManifestPlugin extends AbstractService
private String manifestDir; private String manifestDir;
private String localImageTagToHashFile; private String localImageTagToHashFile;
private static final Log LOG = LogFactory.getLog( private static final Logger LOG = LoggerFactory.getLogger(ImageTagToManifestPlugin.class);
ImageTagToManifestPlugin.class);
private static final int SHA256_HASH_LENGTH = 64; private static final int SHA256_HASH_LENGTH = 64;
private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+";

View File

@ -17,8 +17,6 @@
*/ */
package org.apache.hadoop.yarn.server.resourcemanager; package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -27,11 +25,14 @@ import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName; import javax.management.ObjectName;
import javax.management.StandardMBean; import javax.management.StandardMBean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* JMX bean for RM info. * JMX bean for RM info.
*/ */
public class RMInfo implements RMInfoMXBean { public class RMInfo implements RMInfoMXBean {
private static final Log LOG = LogFactory.getLog(RMNMInfo.class); private static final Logger LOG = LoggerFactory.getLogger(RMNMInfo.class);
private ResourceManager resourceManager; private ResourceManager resourceManager;
private ObjectName rmStatusBeanName; private ObjectName rmStatusBeanName;

View File

@ -22,6 +22,8 @@ import com.google.inject.Inject;
import com.google.inject.Singleton; import com.google.inject.Singleton;
import com.sun.jersey.api.json.JSONConfiguration; import com.sun.jersey.api.json.JSONConfiguration;
import com.sun.jersey.api.json.JSONJAXBContext; import com.sun.jersey.api.json.JSONJAXBContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*; import java.util.*;
@ -29,8 +31,6 @@ import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.Provider; import javax.ws.rs.ext.Provider;
import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBContext;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
@ -41,8 +41,7 @@ import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
@Provider @Provider
public class JAXBContextResolver implements ContextResolver<JAXBContext> { public class JAXBContextResolver implements ContextResolver<JAXBContext> {
private static final Log LOG = private static final Logger LOG = LoggerFactory.getLogger(JAXBContextResolver.class.getName());
LogFactory.getLog(JAXBContextResolver.class.getName());
private final Map<Class, JAXBContext> typesContextMap; private final Map<Class, JAXBContext> typesContextMap;

View File

@ -22,8 +22,6 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.C
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotEquals;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
@ -49,14 +47,16 @@ import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
public class TestCapacitySchedulerMultiNodesWithPreemption { public class TestCapacitySchedulerMultiNodesWithPreemption {
private static final Log LOG = LogFactory private static final Logger LOG =
.getLog(TestCapacitySchedulerMultiNodesWithPreemption.class); LoggerFactory.getLogger(TestCapacitySchedulerMultiNodesWithPreemption.class);
private CapacitySchedulerConfiguration conf; private CapacitySchedulerConfiguration conf;
private static final String POLICY_CLASS_NAME = private static final String POLICY_CLASS_NAME =
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement." "org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement."

View File

@ -104,6 +104,10 @@
<groupId>com.sun.jersey</groupId> <groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId> <artifactId>jersey-json</artifactId>
</exclusion> </exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
@ -336,6 +340,10 @@
<groupId>com.sun.jersey</groupId> <groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId> <artifactId>jersey-json</artifactId>
</exclusion> </exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
@ -351,6 +359,10 @@
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId> <artifactId>hadoop-hdfs-client</artifactId>
</exclusion> </exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
@ -367,6 +379,10 @@
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId> <artifactId>hadoop-hdfs-client</artifactId>
</exclusion> </exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
</exclusions> </exclusions>
</dependency> </dependency>

View File

@ -288,6 +288,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
<bannedImport>org.glassfish.grizzly.**</bannedImport> <bannedImport>org.glassfish.grizzly.**</bannedImport>
</bannedImports> </bannedImports>
</restrictImports> </restrictImports>
<restrictImports>
<includeTestCode>true</includeTestCode>
<reason>Use slf4j based Logger</reason>
<bannedImports>
<bannedImport>org.apache.commons.logging.**</bannedImport>
</bannedImports>
</restrictImports>
</rules> </rules>
</configuration> </configuration>
</execution> </execution>