YARN-7243. Moving logging APIs over to slf4j in hadoop-yarn-server-resourcemanager.

Signed-off-by: Akira Ajisaka <aajisaka@apache.org>
This commit is contained in:
Prabhu Joseph 2019-03-04 14:47:22 +05:30 committed by Akira Ajisaka
parent fe7551f21b
commit e40e2d6ad5
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
211 changed files with 894 additions and 822 deletions

View File

@ -34,6 +34,7 @@ import java.util.Arrays;
import java.util.Locale;
import java.util.Random;
import java.util.Set;
import java.util.Enumeration;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
@ -191,6 +192,14 @@ public abstract class GenericTestUtils {
setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
}
public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) {
for (Enumeration<?> loggers = LogManager.getCurrentLoggers();
loggers.hasMoreElements();) {
Logger logger = (Logger) loggers.nextElement();
logger.setLevel(Level.toLevel(level.toString()));
}
}
public static org.slf4j.event.Level toLevel(String level) {
return toLevel(level, org.slf4j.event.Level.DEBUG);
}

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@ -31,6 +29,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
@ -39,7 +39,8 @@ import java.io.IOException;
*/
class AMSProcessingChain implements ApplicationMasterServiceProcessor {
private static final Log LOG = LogFactory.getLog(AMSProcessingChain.class);
private static final Logger LOG =
LoggerFactory.getLogger(AMSProcessingChain.class);
private ApplicationMasterServiceProcessor head;
private RMContext rmContext;

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -52,8 +52,8 @@ import java.util.TimerTask;
public class ActiveStandbyElectorBasedElectorService extends AbstractService
implements EmbeddedElector,
ActiveStandbyElector.ActiveStandbyElectorCallback {
private static final Log LOG = LogFactory.getLog(
ActiveStandbyElectorBasedElectorService.class.getName());
private static final Logger LOG = LoggerFactory.
getLogger(ActiveStandbyElectorBasedElectorService.class.getName());
private static final HAServiceProtocol.StateChangeRequestInfo req =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_ZKFC);

View File

@ -29,8 +29,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -110,7 +110,8 @@ import com.google.protobuf.BlockingService;
public class AdminService extends CompositeService implements
HAServiceProtocol, ResourceManagerAdministrationProtocol {
private static final Log LOG = LogFactory.getLog(AdminService.class);
private static final Logger LOG =
LoggerFactory.getLogger(AdminService.class);
private final ResourceManager rm;
private String rmId;

View File

@ -27,8 +27,8 @@ import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -83,7 +83,8 @@ import com.google.common.annotations.VisibleForTesting;
@Private
public class ApplicationMasterService extends AbstractService implements
ApplicationMasterProtocol {
private static final Log LOG = LogFactory.getLog(ApplicationMasterService.class);
private static final Logger LOG = LoggerFactory.
getLogger(ApplicationMasterService.class);
private final AMLivelinessMonitor amLivelinessMonitor;
private YarnScheduler rScheduler;

View File

@ -40,8 +40,8 @@ import java.util.stream.Collectors;
import org.apache.commons.cli.UnrecognizedOptionException;
import org.apache.commons.lang3.Range;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -209,7 +209,8 @@ public class ClientRMService extends AbstractService implements
ApplicationClientProtocol {
private static final ArrayList<ApplicationReport> EMPTY_APPS_REPORT = new ArrayList<ApplicationReport>();
private static final Log LOG = LogFactory.getLog(ClientRMService.class);
private static final Logger LOG =
LoggerFactory.getLogger(ClientRMService.class);
final private AtomicInteger applicationCounter = new AtomicInteger(0);
final private YarnScheduler scheduler;

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.leader.LeaderLatch;
import org.apache.curator.framework.recipes.leader.LeaderLatchListener;
@ -41,8 +41,8 @@ import java.io.IOException;
@InterfaceStability.Unstable
public class CuratorBasedElectorService extends AbstractService
implements EmbeddedElector, LeaderLatchListener {
public static final Log LOG =
LogFactory.getLog(CuratorBasedElectorService.class);
public static final Logger LOG =
LoggerFactory.getLogger(CuratorBasedElectorService.class);
private LeaderLatch leaderLatch;
private CuratorFramework curator;
private String latchPath;

View File

@ -25,8 +25,8 @@ import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerState;
@ -67,8 +67,8 @@ import org.apache.hadoop.yarn.util.MonotonicClock;
* (the affected map tasks will be rescheduled).
*/
public class DecommissioningNodesWatcher {
private static final Log LOG =
LogFactory.getLog(DecommissioningNodesWatcher.class);
private static final Logger LOG =
LoggerFactory.getLogger(DecommissioningNodesWatcher.class);
private final RMContext rmContext;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
@ -103,7 +103,8 @@ import static org.apache.hadoop.yarn.exceptions
*/
final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
private static final Log LOG = LogFactory.getLog(DefaultAMSProcessor.class);
private static final Logger LOG =
LoggerFactory.getLogger(DefaultAMSProcessor.class);
private final static List<Container> EMPTY_CONTAINER_LIST =
new ArrayList<Container>();

View File

@ -31,8 +31,8 @@ import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.Node;
@ -66,7 +66,8 @@ import com.google.common.annotations.VisibleForTesting;
public class NodesListManager extends CompositeService implements
EventHandler<NodesListManagerEvent> {
private static final Log LOG = LogFactory.getLog(NodesListManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(NodesListManager.class);
private HostsFileReader hostsReader;
private Configuration conf;

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
@ -95,8 +95,8 @@ public class OpportunisticContainerAllocatorAMService
extends ApplicationMasterService implements DistributedSchedulingAMProtocol,
EventHandler<SchedulerEvent> {
private static final Log LOG =
LogFactory.getLog(OpportunisticContainerAllocatorAMService.class);
private static final Logger LOG =
LoggerFactory.getLogger(OpportunisticContainerAllocatorAMService.class);
private final NodeQueueLoadMonitor nodeMonitor;
private final OpportunisticContainerAllocator oppContainerAllocator;

View File

@ -22,8 +22,8 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -70,8 +70,8 @@ import org.apache.hadoop.yarn.util.SystemClock;
@Unstable
public class RMActiveServiceContext {
private static final Log LOG = LogFactory
.getLog(RMActiveServiceContext.class);
private static final Logger LOG = LoggerFactory
.getLogger(RMActiveServiceContext.class);
private final ConcurrentMap<ApplicationId, RMApp> applications =
new ConcurrentHashMap<ApplicationId, RMApp>();

View File

@ -24,8 +24,8 @@ import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.AccessControlException;
@ -82,7 +82,8 @@ import org.apache.hadoop.yarn.util.StringHelper;
public class RMAppManager implements EventHandler<RMAppManagerEvent>,
Recoverable {
private static final Log LOG = LogFactory.getLog(RMAppManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMAppManager.class);
private int maxCompletedAppsInMemory;
private int maxCompletedAppsInStateStore;
@ -124,7 +125,8 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
* This class is for logging the application summary.
*/
static class ApplicationSummary {
static final Log LOG = LogFactory.getLog(ApplicationSummary.class);
static final Logger LOG = LoggerFactory.
getLogger(ApplicationSummary.class);
// Escape sequences
static final char EQUALS = '=';
@ -214,7 +216,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
*/
public static void logAppSummary(RMApp app) {
if (app != null) {
LOG.info(createAppSummary(app));
LOG.info(createAppSummary(app).toString());
}
}
}

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.ipc.CallerContext;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@ -35,7 +35,8 @@ import org.apache.hadoop.yarn.api.records.Resource;
* Audit log format is written as key=value pairs. Tab separated.
*/
public class RMAuditLogger {
private static final Log LOG = LogFactory.getLog(RMAuditLogger.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMAuditLogger.class);
enum Keys {USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS,
DESCRIPTION, APPID, APPATTEMPTID, CONTAINERID,

View File

@ -22,8 +22,8 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -83,7 +83,8 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
*/
public class RMContextImpl implements RMContext {
private static final Log LOG = LogFactory.getLog(RMContextImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMContextImpl.class);
private static final String UNAVAILABLE = "N/A";
/**
* RM service contexts which runs through out RM life span. These are created

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager;
import java.lang.Thread.UncaughtExceptionHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.exceptions.YarnException;
@ -35,7 +35,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
public class RMCriticalThreadUncaughtExceptionHandler
implements UncaughtExceptionHandler {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
RMCriticalThreadUncaughtExceptionHandler.class);
private final RMContext rmContext;

View File

@ -27,8 +27,8 @@ import java.util.List;
import javax.management.NotCompliantMBeanException;
import javax.management.StandardMBean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@ -39,7 +39,8 @@ import org.eclipse.jetty.util.ajax.JSON;
* JMX bean listing statuses of all node managers.
*/
public class RMNMInfo implements RMNMInfoBeans {
private static final Log LOG = LogFactory.getLog(RMNMInfo.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMNMInfo.class);
private RMContext rmContext;
private ResourceScheduler scheduler;

View File

@ -31,8 +31,8 @@ import java.util.Map;
import java.util.Set;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@ -89,7 +89,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
*/
public class RMServerUtils {
private static final Log LOG_HANDLE = LogFactory.getLog(RMServerUtils.class);
private static final Logger LOG_HANDLE =
LoggerFactory.getLogger(RMServerUtils.class);
public static final String UPDATE_OUTSTANDING_ERROR =
"UPDATE_OUTSTANDING_ERROR";
@ -377,7 +378,7 @@ public class RMServerUtils {
}
public static UserGroupInformation verifyAdminAccess(
YarnAuthorizationProvider authorizer, String method, final Log LOG)
YarnAuthorizationProvider authorizer, String method, final Logger LOG)
throws IOException {
// by default, this method will use AdminService as module name
return verifyAdminAccess(authorizer, method, "AdminService", LOG);
@ -396,7 +397,7 @@ public class RMServerUtils {
*/
public static UserGroupInformation verifyAdminAccess(
YarnAuthorizationProvider authorizer, String method, String module,
final Log LOG)
final Logger LOG)
throws IOException {
UserGroupInformation user;
try {

View File

@ -21,8 +21,10 @@ package org.apache.hadoop.yarn.server.resourcemanager;
import com.google.common.annotations.VisibleForTesting;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
import org.slf4j.MarkerFactory;
import org.apache.curator.framework.AuthInfo;
import org.apache.curator.framework.CuratorFramework;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@ -164,7 +166,9 @@ public class ResourceManager extends CompositeService
*/
public static final int EPOCH_BIT_SHIFT = 40;
private static final Log LOG = LogFactory.getLog(ResourceManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(ResourceManager.class);
private static final Marker FATAL = MarkerFactory.getMarker("FATAL");
private static long clusterTimeStamp = System.currentTimeMillis();
/*
@ -950,15 +954,16 @@ public class ResourceManager extends CompositeService
// how depends on the event.
switch(event.getType()) {
case STATE_STORE_FENCED:
LOG.fatal("State store fenced even though the resource manager " +
"is not configured for high availability. Shutting down this " +
"resource manager to protect the integrity of the state store.");
LOG.error(FATAL, "State store fenced even though the resource " +
"manager is not configured for high availability. Shutting " +
"down this resource manager to protect the integrity of the " +
"state store.");
ExitUtil.terminate(1, event.getExplanation());
break;
case STATE_STORE_OP_FAILED:
if (YarnConfiguration.shouldRMFailFast(getConfig())) {
LOG.fatal("Shutting down the resource manager because a state " +
"store operation failed, and the resource manager is " +
LOG.error(FATAL, "Shutting down the resource manager because a " +
"state store operation failed, and the resource manager is " +
"configured to fail fast. See the yarn.fail-fast and " +
"yarn.resourcemanager.fail-fast properties.");
ExitUtil.terminate(1, event.getExplanation());
@ -970,7 +975,7 @@ public class ResourceManager extends CompositeService
}
break;
default:
LOG.fatal("Shutting down the resource manager.");
LOG.error(FATAL, "Shutting down the resource manager.");
ExitUtil.terminate(1, event.getExplanation());
}
}
@ -1019,7 +1024,7 @@ public class ResourceManager extends CompositeService
elector.rejoinElection();
}
} catch (Exception e) {
LOG.fatal("Failed to transition RM to Standby mode.", e);
LOG.error(FATAL, "Failed to transition RM to Standby mode.", e);
ExitUtil.terminate(1, e);
}
}
@ -1556,7 +1561,7 @@ public class ResourceManager extends CompositeService
resourceManager.start();
}
} catch (Throwable t) {
LOG.fatal("Error starting ResourceManager", t);
LOG.error(FATAL, "Error starting ResourceManager", t);
System.exit(-1);
}
}

View File

@ -33,8 +33,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.collections.CollectionUtils;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.Server;
@ -98,7 +98,8 @@ import com.google.common.annotations.VisibleForTesting;
public class ResourceTrackerService extends AbstractService implements
ResourceTracker {
private static final Log LOG = LogFactory.getLog(ResourceTrackerService.class);
private static final Logger LOG =
LoggerFactory.getLogger(ResourceTrackerService.class);
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@ -881,7 +882,7 @@ public class ResourceTrackerService extends AbstractService implements
.append("} reported from NM with ID ").append(nodeId)
.append(" was rejected from RM with exception message as : ")
.append(ex.getMessage());
LOG.error(errorMessage, ex);
LOG.error(errorMessage.toString(), ex);
throw new IOException(errorMessage.toString(), ex);
}
}

View File

@ -22,8 +22,8 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -69,8 +69,8 @@ import com.google.common.annotations.VisibleForTesting;
@Unstable
public class RMApplicationHistoryWriter extends CompositeService {
public static final Log LOG = LogFactory
.getLog(RMApplicationHistoryWriter.class);
public static final Logger LOG =
LoggerFactory.getLogger(RMApplicationHistoryWriter.class);
private Dispatcher dispatcher;
@VisibleForTesting

View File

@ -26,8 +26,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputByteBuffer;
@ -75,7 +75,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class AMLauncher implements Runnable {
private static final Log LOG = LogFactory.getLog(AMLauncher.class);
private static final Logger LOG =
LoggerFactory.getLogger(AMLauncher.class);
private ContainerManagementProtocol containerMgrProxy;

View File

@ -24,8 +24,8 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.service.AbstractService;
@ -37,7 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
public class ApplicationMasterLauncher extends AbstractService implements
EventHandler<AMLauncherEvent> {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
ApplicationMasterLauncher.class);
private ThreadPoolExecutor launcherPool;
private LauncherThread launcherHandlingThread;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.blacklist;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
import java.util.ArrayList;
@ -40,7 +40,8 @@ public class SimpleBlacklistManager implements BlacklistManager {
private final Set<String> blacklistNodes = new HashSet<>();
private static final ArrayList<String> EMPTY_LIST = new ArrayList<>();
private static final Log LOG = LogFactory.getLog(SimpleBlacklistManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(SimpleBlacklistManager.class);
public SimpleBlacklistManager(int numberOfNodeManagerHosts,
double blacklistDisableFailureThreshold) {

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.metrics;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -51,8 +51,8 @@ import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
*/
public class TimelineServiceV1Publisher extends AbstractSystemMetricsPublisher {
private static final Log LOG =
LogFactory.getLog(TimelineServiceV1Publisher.class);
private static final Logger LOG =
LoggerFactory.getLogger(TimelineServiceV1Publisher.class);
public TimelineServiceV1Publisher() {
super("TimelineserviceV1Publisher");

View File

@ -24,8 +24,8 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -71,8 +71,8 @@ import com.google.common.annotations.VisibleForTesting;
@Private
@Unstable
public class TimelineServiceV2Publisher extends AbstractSystemMetricsPublisher {
private static final Log LOG =
LogFactory.getLog(TimelineServiceV2Publisher.class);
private static final Logger LOG =
LoggerFactory.getLogger(TimelineServiceV2Publisher.class);
private RMTimelineCollectorManager rmTimelineCollectorManager;
private boolean publishContainerEvents;

View File

@ -23,8 +23,8 @@ import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@ -34,7 +34,8 @@ import com.google.common.annotations.VisibleForTesting;
public class SchedulingMonitor extends AbstractService {
private final SchedulingEditPolicy scheduleEditPolicy;
private static final Log LOG = LogFactory.getLog(SchedulingMonitor.class);
private static final Logger LOG =
LoggerFactory.getLogger(SchedulingMonitor.class);
// ScheduledExecutorService which schedules the PreemptionChecker to run
// periodically.

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -36,7 +36,7 @@ import java.util.Set;
* Manages scheduling monitors.
*/
public class SchedulingMonitorManager {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
SchedulingMonitorManager.class);
private Map<String, SchedulingMonitor> runningSchedulingMonitors =

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
@ -40,8 +40,8 @@ import java.util.TreeSet;
public class FifoCandidatesSelector
extends PreemptionCandidatesSelector {
private static final Log LOG =
LogFactory.getLog(FifoCandidatesSelector.class);
private static final Logger LOG =
LoggerFactory.getLogger(FifoCandidatesSelector.class);
private PreemptableResourceCalculator preemptableAmountCalculator;
private boolean allowQueuesBalanceAfterAllQueuesSatisfied;

View File

@ -30,8 +30,8 @@ import java.util.PriorityQueue;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAFairOrderingComparator;
@ -58,8 +58,8 @@ public class FifoIntraQueuePreemptionPlugin
protected final CapacitySchedulerPreemptionContext context;
protected final ResourceCalculator rc;
private static final Log LOG =
LogFactory.getLog(FifoIntraQueuePreemptionPlugin.class);
private static final Logger LOG =
LoggerFactory.getLogger(FifoIntraQueuePreemptionPlugin.class);
public FifoIntraQueuePreemptionPlugin(ResourceCalculator rc,
CapacitySchedulerPreemptionContext preemptionContext) {
@ -185,7 +185,7 @@ public class FifoIntraQueuePreemptionPlugin
if (LOG.isDebugEnabled()) {
LOG.debug("Queue Name:" + tq.queueName + ", partition:" + tq.partition);
for (TempAppPerPartition tmpApp : tq.getApps()) {
LOG.debug(tmpApp);
LOG.debug(tmpApp.toString());
}
}
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
@ -107,8 +107,8 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
IntraQueuePreemptionComputePlugin fifoPreemptionComputePlugin = null;
final CapacitySchedulerPreemptionContext context;
private static final Log LOG =
LogFactory.getLog(IntraQueueCandidatesSelector.class);
private static final Logger LOG =
LoggerFactory.getLogger(IntraQueueCandidatesSelector.class);
IntraQueueCandidatesSelector(
CapacitySchedulerPreemptionContext preemptionContext) {

View File

@ -23,8 +23,8 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@ -38,8 +38,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
public class PreemptableResourceCalculator
extends
AbstractPreemptableResourceCalculator {
private static final Log LOG =
LogFactory.getLog(PreemptableResourceCalculator.class);
private static final Logger LOG =
LoggerFactory.getLogger(PreemptableResourceCalculator.class);
/**
* PreemptableResourceCalculator constructor
@ -217,10 +217,7 @@ public class PreemptableResourceCalculator
} else {
qT.setActuallyToBePreempted(Resources.none());
}
if (LOG.isDebugEnabled()) {
LOG.debug(qT);
}
LOG.debug("{}", qT);
}
}
}

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@ -94,8 +94,8 @@ public class ProportionalCapacityPreemptionPolicy
PRIORITY_FIRST, USERLIMIT_FIRST;
}
private static final Log LOG =
LogFactory.getLog(ProportionalCapacityPreemptionPolicy.class);
private static final Logger LOG =
LoggerFactory.getLogger(ProportionalCapacityPreemptionPolicy.class);
private final Clock clock;

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -45,8 +45,8 @@ import java.util.Set;
public class QueuePriorityContainerCandidateSelector
extends PreemptionCandidatesSelector {
private static final Log LOG =
LogFactory.getLog(QueuePriorityContainerCandidateSelector.class);
private static final Logger LOG =
LoggerFactory.getLogger(QueuePriorityContainerCandidateSelector.class);
// Configured timeout before doing reserved container preemption
private long minTimeout;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -37,8 +37,8 @@ import java.util.Set;
public class ReservedContainerCandidatesSelector
extends PreemptionCandidatesSelector {
private static final Log LOG =
LogFactory.getLog(ReservedContainerCandidatesSelector.class);
private static final Logger LOG =
LoggerFactory.getLogger(ReservedContainerCandidatesSelector.class);
private PreemptableResourceCalculator preemptableAmountCalculator;

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.nodelabels;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
@ -43,8 +43,8 @@ public class FileSystemNodeAttributeStore
extends AbstractFSNodeStore<NodeAttributesManager>
implements NodeAttributeStore {
protected static final Log LOG =
LogFactory.getLog(FileSystemNodeAttributeStore.class);
protected static final Logger LOG =
LoggerFactory.getLogger(FileSystemNodeAttributeStore.class);
protected static final String DEFAULT_DIR_NAME = "node-attribute";
protected static final String MIRROR_FILENAME = "nodeattribute.mirror";

View File

@ -37,8 +37,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import com.google.common.base.Strings;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.api.records.NodeAttribute;
@ -67,8 +67,8 @@ import com.google.common.base.Strings;
* Manager holding the attributes to Labels.
*/
public class NodeAttributesManagerImpl extends NodeAttributesManager {
protected static final Log LOG =
LogFactory.getLog(NodeAttributesManagerImpl.class);
protected static final Logger LOG =
LoggerFactory.getLogger(NodeAttributesManagerImpl.class);
/**
* If a user doesn't specify value for a label, then empty string is
* considered as default.
@ -200,10 +200,7 @@ public class NodeAttributesManagerImpl extends NodeAttributesManager {
.append(StringUtils.join(entry.getValue().keySet(), ","))
.append("] ,");
}
if (LOG.isDebugEnabled()) {
LOG.debug(logMsg);
}
LOG.debug("{}", logMsg);
if (null != dispatcher && NodeAttribute.PREFIX_CENTRALIZED
.equals(attributePrefix)) {

View File

@ -23,8 +23,8 @@ import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeLabel;
@ -32,7 +32,8 @@ import org.apache.hadoop.yarn.api.records.NodeLabel;
* Node labels utilities.
*/
public final class NodeLabelsUtils {
private static final Log LOG = LogFactory.getLog(NodeLabelsUtils.class);
private static final Logger LOG =
LoggerFactory.getLogger(NodeLabelsUtils.class);
private NodeLabelsUtils() { /* Hidden constructor */ }

View File

@ -27,8 +27,8 @@ import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.records.NodeId;
@ -47,8 +47,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class RMDelegatedNodeLabelsUpdater extends CompositeService {
private static final Log LOG = LogFactory
.getLog(RMDelegatedNodeLabelsUpdater.class);
private static final Logger LOG = LoggerFactory
.getLogger(RMDelegatedNodeLabelsUpdater.class);
public static final long DISABLE_DELEGATED_NODE_LABELS_UPDATE = -1;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.placement;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@ -41,8 +41,8 @@ import static org.apache.hadoop.yarn.server.resourcemanager.placement.QueuePlace
import static org.apache.hadoop.yarn.server.resourcemanager.placement.QueuePlacementRuleUtils.validateAndGetQueueMapping;
public class AppNameMappingPlacementRule extends PlacementRule {
private static final Log LOG = LogFactory
.getLog(AppNameMappingPlacementRule.class);
private static final Logger LOG = LoggerFactory
.getLogger(AppNameMappingPlacementRule.class);
public static final String CURRENT_APP_MAPPING = "%application";

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.placement;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
@ -28,7 +28,8 @@ import org.apache.hadoop.util.ReflectionUtils;
*/
public final class PlacementFactory {
private static final Log LOG = LogFactory.getLog(PlacementFactory.class);
private static final Logger LOG =
LoggerFactory.getLogger(PlacementFactory.class);
private PlacementFactory() {
// Unused.

View File

@ -23,15 +23,16 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.exceptions.YarnException;
import com.google.common.annotations.VisibleForTesting;
public class PlacementManager {
private static final Log LOG = LogFactory.getLog(PlacementManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(PlacementManager.class);
List<PlacementRule> rules;
ReadLock readLock;

View File

@ -23,8 +23,8 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -47,8 +47,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ManagedP
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT;
public class UserGroupMappingPlacementRule extends PlacementRule {
private static final Log LOG = LogFactory
.getLog(UserGroupMappingPlacementRule.class);
private static final Logger LOG = LoggerFactory
.getLogger(UserGroupMappingPlacementRule.class);
public static final String CURRENT_USER_MAPPING = "%user";

View File

@ -31,8 +31,8 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -86,7 +86,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class FileSystemRMStateStore extends RMStateStore {
public static final Log LOG = LogFactory.getLog(FileSystemRMStateStore.class);
public static final Logger LOG =
LoggerFactory.getLogger(FileSystemRMStateStore.class);
protected static final String ROOT_DIR_NAME = "FSRMStateRoot";
protected static final Version CURRENT_VERSION_INFO = Version
@ -818,7 +819,7 @@ public class FileSystemRMStateStore extends RMStateStore {
fsIn.readFully(data);
return data;
} finally {
IOUtils.cleanup(LOG, fsIn);
IOUtils.cleanupWithLogger(LOG, fsIn);
}
}
@ -852,7 +853,7 @@ public class FileSystemRMStateStore extends RMStateStore {
fsOut = null;
fs.rename(tempPath, outputPath);
} finally {
IOUtils.cleanup(LOG, fsOut);
IOUtils.cleanupWithLogger(LOG, fsOut);
}
}

View File

@ -34,8 +34,8 @@ import java.util.Map.Entry;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -80,8 +80,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class LeveldbRMStateStore extends RMStateStore {
public static final Log LOG =
LogFactory.getLog(LeveldbRMStateStore.class);
public static final Logger LOG =
LoggerFactory.getLogger(LeveldbRMStateStore.class);
private static final String SEPARATOR = "/";
private static final String DB_NAME = "yarn-rm-state";
@ -377,7 +377,7 @@ public class LeveldbRMStateStore extends RMStateStore {
try {
key.readFields(in);
} finally {
IOUtils.cleanup(LOG, in);
IOUtils.cleanupWithLogger(LOG, in);
}
return key;
}
@ -423,7 +423,7 @@ public class LeveldbRMStateStore extends RMStateStore {
try {
tokenData = RMStateStoreUtils.readRMDelegationTokenIdentifierData(in);
} finally {
IOUtils.cleanup(LOG, in);
IOUtils.cleanupWithLogger(LOG, in);
}
return tokenData;
}
@ -441,7 +441,7 @@ public class LeveldbRMStateStore extends RMStateStore {
try {
state.rmSecretManagerState.dtSequenceNumber = in.readInt();
} finally {
IOUtils.cleanup(LOG, in);
IOUtils.cleanupWithLogger(LOG, in);
}
}
}

View File

@ -42,8 +42,8 @@ import javax.crypto.SecretKey;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.SettableFuture;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -121,7 +121,8 @@ public abstract class RMStateStore extends AbstractService {
private final ReadLock readLock;
private final WriteLock writeLock;
public static final Log LOG = LogFactory.getLog(RMStateStore.class);
public static final Logger LOG =
LoggerFactory.getLogger(RMStateStore.class);
/**
* The enum defines state of RMStateStore.

View File

@ -17,14 +17,15 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
public class RMStateStoreFactory {
private static final Log LOG = LogFactory.getLog(RMStateStoreFactory.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMStateStoreFactory.class);
public static RMStateStore getStore(Configuration conf) {
Class<? extends RMStateStore> storeClass =

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.recovery;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
@ -37,7 +37,8 @@ import java.io.IOException;
@Unstable
public class RMStateStoreUtils {
public static final Log LOG = LogFactory.getLog(RMStateStoreUtils.class);
public static final Logger LOG =
LoggerFactory.getLogger(RMStateStoreUtils.class);
/**
* Returns the RM Delegation Token data from the {@link DataInputStream} as a

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.recovery;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.curator.framework.CuratorFramework;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@ -178,7 +178,8 @@ import java.util.Set;
@Private
@Unstable
public class ZKRMStateStore extends RMStateStore {
private static final Log LOG = LogFactory.getLog(ZKRMStateStore.class);
private static final Logger LOG =
LoggerFactory.getLogger(ZKRMStateStore.class);
private static final String RM_DELEGATION_TOKENS_ROOT_ZNODE_NAME =
"RMDelegationTokensRoot";

View File

@ -22,8 +22,8 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.io.DataInputByteBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
@ -45,8 +45,8 @@ import com.google.protobuf.TextFormat;
public class ApplicationAttemptStateDataPBImpl extends
ApplicationAttemptStateData {
private static Log LOG =
LogFactory.getLog(ApplicationAttemptStateDataPBImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(ApplicationAttemptStateDataPBImpl.class);
ApplicationAttemptStateDataProto proto =
ApplicationAttemptStateDataProto.getDefaultInstance();
ApplicationAttemptStateDataProto.Builder builder = null;

View File

@ -23,8 +23,8 @@ import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
@ -48,8 +48,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class SimpleCapacityReplanner implements Planner {
private static final Log LOG = LogFactory
.getLog(SimpleCapacityReplanner.class);
private static final Logger LOG = LoggerFactory
.getLogger(SimpleCapacityReplanner.class);
private static final Resource ZERO_RESOURCE = Resource.newInstance(0, 0);

View File

@ -23,8 +23,8 @@ import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
public class DynamicResourceConfiguration extends Configuration {
private static final Log LOG =
LogFactory.getLog(DynamicResourceConfiguration.class);
private static final Logger LOG =
LoggerFactory.getLogger(DynamicResourceConfiguration.class);
@Private
public static final String PREFIX = "yarn.resource.dynamic.";

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.resource;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
@ -47,8 +47,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
*/
public class ResourceProfilesManagerImpl implements ResourceProfilesManager {
private static final Log LOG =
LogFactory.getLog(ResourceProfilesManagerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(ResourceProfilesManagerImpl.class);
private final Map<String, Resource> profiles = new ConcurrentHashMap<>();
private Configuration conf;

View File

@ -37,8 +37,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.CallerContext;
@ -120,7 +120,8 @@ import com.google.common.annotations.VisibleForTesting;
@SuppressWarnings({ "rawtypes", "unchecked" })
public class RMAppImpl implements RMApp, Recoverable {
private static final Log LOG = LogFactory.getLog(RMAppImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMAppImpl.class);
private static final String UNAVAILABLE = "N/A";
private static final String UNLIMITED = "UNLIMITED";
private static final long UNKNOWN = -1L;

View File

@ -36,8 +36,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
@ -127,7 +127,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
private static final String DIAGNOSTIC_LIMIT_CONFIG_ERROR_MESSAGE =
"The value of %s should be a positive integer: %s";
private static final Log LOG = LogFactory.getLog(RMAppAttemptImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMAppAttemptImpl.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);

View File

@ -29,8 +29,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.Resource;
@ -42,7 +42,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.util.resource.Resources;
public class RMAppAttemptMetrics {
private static final Log LOG = LogFactory.getLog(RMAppAttemptMetrics.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMAppAttemptMetrics.class);
private ApplicationAttemptId attemptId = null;
// preemption info

View File

@ -22,8 +22,8 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
@ -42,7 +42,8 @@ import org.apache.hadoop.yarn.util.SystemClock;
public class RMAppLifetimeMonitor
extends AbstractLivelinessMonitor<RMAppToMonitor> {
private static final Log LOG = LogFactory.getLog(RMAppLifetimeMonitor.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMAppLifetimeMonitor.class);
private RMContext rmContext;

View File

@ -28,8 +28,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
@ -68,7 +68,8 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@SuppressWarnings({"unchecked", "rawtypes"})
public class RMContainerImpl implements RMContainer {
private static final Log LOG = LogFactory.getLog(RMContainerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMContainerImpl.class);
private static final StateMachineFactory<RMContainerImpl, RMContainerState,
RMContainerEventType, RMContainerEvent>

View File

@ -36,8 +36,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.collections.keyvalue.DefaultMapEntry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.net.Node;
@ -104,7 +104,8 @@ import com.google.common.annotations.VisibleForTesting;
@SuppressWarnings("unchecked")
public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
private static final Log LOG = LogFactory.getLog(RMNodeImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(RMNodeImpl.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);

View File

@ -34,8 +34,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -92,9 +92,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
@ -116,7 +113,8 @@ public abstract class AbstractYarnScheduler
<T extends SchedulerApplicationAttempt, N extends SchedulerNode>
extends AbstractService implements ResourceScheduler {
private static final Log LOG = LogFactory.getLog(AbstractYarnScheduler.class);
private static final Logger LOG =
LoggerFactory.getLogger(AbstractYarnScheduler.class);
protected final ClusterNodeTracker<N> nodeTracker =
new ClusterNodeTracker<>();
@ -775,7 +773,7 @@ public abstract class AbstractYarnScheduler
try {
getQueueInfo(destQueue, false, false);
} catch (IOException e) {
LOG.warn(e);
LOG.warn(e.toString());
throw new YarnException(e);
}
// check if source queue is a valid

View File

@ -22,8 +22,8 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.utils.Lock;
@ -38,7 +38,8 @@ import org.apache.hadoop.yarn.server.utils.Lock;
@Private
public class ActiveUsersManager implements AbstractUsersManager {
private static final Log LOG = LogFactory.getLog(ActiveUsersManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(ActiveUsersManager.class);
private final QueueMetrics metrics;

View File

@ -31,8 +31,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@ -63,7 +63,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
@Unstable
public class AppSchedulingInfo {
private static final Log LOG = LogFactory.getLog(AppSchedulingInfo.class);
private static final Logger LOG =
LoggerFactory.getLogger(AppSchedulingInfo.class);
private final ApplicationId applicationId;
private final ApplicationAttemptId applicationAttemptId;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -50,7 +50,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
*/
@InterfaceAudience.Private
public class ClusterNodeTracker<N extends SchedulerNode> {
private static final Log LOG = LogFactory.getLog(ClusterNodeTracker.class);
private static final Logger LOG =
LoggerFactory.getLogger(ClusterNodeTracker.class);
private ReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
private Lock readLock = readWriteLock.readLock();

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
*/
public final class ConfigurationMutationACLPolicyFactory {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
ConfigurationMutationACLPolicyFactory.class);
private ConfigurationMutationACLPolicyFactory() {

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.QueueState;
@ -38,7 +38,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSche
public class QueueStateManager<T extends SchedulerQueue,
E extends ReservationSchedulerConfiguration> {
private static final Log LOG = LogFactory.getLog(QueueStateManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(QueueStateManager.class);
private SchedulerQueueManager<T, E> queueManager;

View File

@ -18,12 +18,13 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.commons.logging.Log;
import org.slf4j.Logger;
public class SchedulerAppUtils {
public static boolean isPlaceBlacklisted(
SchedulerApplicationAttempt application, SchedulerNode node, Log log) {
SchedulerApplicationAttempt application, SchedulerNode node,
Logger log) {
if (application.isPlaceBlacklisted(node.getNodeName())) {
if (log.isDebugEnabled()) {
log.debug("Skipping 'host' " + node.getNodeName() +

View File

@ -36,8 +36,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@ -100,8 +100,8 @@ import com.google.common.collect.ConcurrentHashMultiset;
@Unstable
public class SchedulerApplicationAttempt implements SchedulableEntity {
private static final Log LOG = LogFactory
.getLog(SchedulerApplicationAttempt.class);
private static final Logger LOG = LoggerFactory
.getLogger(SchedulerApplicationAttempt.class);
private FastDateFormat fdf =
FastDateFormat.getInstance("EEE MMM dd HH:mm:ss Z yyyy");

View File

@ -26,8 +26,8 @@ import java.util.Map;
import java.util.Set;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.util.Time;
@ -58,7 +58,8 @@ import com.google.common.collect.ImmutableSet;
@Unstable
public abstract class SchedulerNode {
private static final Log LOG = LogFactory.getLog(SchedulerNode.class);
private static final Logger LOG =
LoggerFactory.getLogger(SchedulerNode.class);
private Resource unallocatedResource = Resource.newInstance(0, 0);
private Resource allocatedResource = Resource.newInstance(0, 0);

View File

@ -27,8 +27,8 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -104,7 +104,8 @@ public class SchedulerUtils {
}
}
private static final Log LOG = LogFactory.getLog(SchedulerUtils.class);
private static final Logger LOG =
LoggerFactory.getLogger(SchedulerUtils.class);
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@ -34,7 +34,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS
*/
// FIXME: make sure CandidateNodeSet works with this class
public class ActivitiesLogger {
private static final Log LOG = LogFactory.getLog(ActivitiesLogger.class);
private static final Logger LOG =
LoggerFactory.getLogger(ActivitiesLogger.class);
/**
* Methods for recording activities from an app

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@ -44,7 +44,8 @@ import java.util.ArrayList;
* It mainly contains operations for allocation start, add, update and finish.
*/
public class ActivitiesManager extends AbstractService {
private static final Log LOG = LogFactory.getLog(ActivitiesManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(ActivitiesManager.class);
private ConcurrentMap<NodeId, List<NodeAllocation>> recordingNodesAllocation;
private ConcurrentMap<NodeId, List<NodeAllocation>> completedNodeAllocations;
private Set<NodeId> activeRecordedNodes;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* It records an activity operation in allocation,
@ -34,7 +34,8 @@ public class AllocationActivity {
private ActivityState state;
private String diagnostic = null;
private static final Log LOG = LogFactory.getLog(AllocationActivity.class);
private static final Logger LOG =
LoggerFactory.getLogger(AllocationActivity.class);
public AllocationActivity(String parentName, String queueName,
String priority, ActivityState state, String diagnostic, String type) {

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@ -46,7 +46,8 @@ public class NodeAllocation {
private ActivityNode root = null;
private static final Log LOG = LogFactory.getLog(NodeAllocation.class);
private static final Logger LOG =
LoggerFactory.getLogger(NodeAllocation.class);
public NodeAllocation(NodeId nodeId) {
this.nodeId = nodeId;

View File

@ -29,8 +29,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.AccessControlException;
@ -79,7 +79,8 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.C
public abstract class AbstractCSQueue implements CSQueue {
private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class);
private static final Logger LOG =
LoggerFactory.getLogger(AbstractCSQueue.class);
volatile CSQueue parent;
final String queueName;
private final String queuePath;

View File

@ -23,8 +23,8 @@ import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils;
@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.api.records.Priority;
*/
public class AppPriorityACLConfigurationParser {
private static final Log LOG = LogFactory
.getLog(AppPriorityACLConfigurationParser.class);
private static final Logger LOG = LoggerFactory
.getLogger(AppPriorityACLConfigurationParser.class);
public enum AppPriorityACLKeyType {
USER(1), GROUP(2), MAX_PRIORITY(3), DEFAULT_PRIORITY(4);

View File

@ -35,8 +35,10 @@ import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
import org.slf4j.MarkerFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configurable;
@ -174,7 +176,10 @@ public class CapacityScheduler extends
PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
ResourceAllocationCommitter, MutableConfScheduler {
private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
private static final Marker FATAL =
MarkerFactory.getMarker("FATAL");
private static final Logger LOG =
LoggerFactory.getLogger(CapacityScheduler.class);
private CapacitySchedulerQueueManager queueManager;
@ -660,7 +665,7 @@ public class CapacityScheduler extends
}
} catch (InterruptedException e) {
LOG.error(e);
LOG.error(e.toString());
Thread.currentThread().interrupt();
}
}
@ -814,7 +819,7 @@ public class CapacityScheduler extends
+ "supported by the capacity scheduler, please "
+ "restart with all queues configured"
+ " which were present before shutdown/restart.";
LOG.fatal(queueErrorMsg);
LOG.error(FATAL, queueErrorMsg);
throw new QueueInvalidException(queueErrorMsg);
}
}
@ -835,7 +840,7 @@ public class CapacityScheduler extends
+ " not presently supported by the capacity scheduler. Please"
+ " restart with leaf queues before shutdown/restart continuing"
+ " as leaf queues.";
LOG.fatal(queueErrorMsg);
LOG.error(FATAL, queueErrorMsg);
throw new QueueInvalidException(queueErrorMsg);
}
}
@ -892,7 +897,7 @@ public class CapacityScheduler extends
String queueErrorMsg =
"Queue named " + queueName + " could not be "
+ "auto-created during application recovery.";
LOG.fatal(queueErrorMsg, e);
LOG.error(FATAL, queueErrorMsg, e);
throw new QueueInvalidException(queueErrorMsg);
}
} else{

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authorize.AccessControlList;
@ -74,8 +74,8 @@ import java.util.StringTokenizer;
public class CapacitySchedulerConfiguration extends ReservationSchedulerConfiguration {
private static final Log LOG =
LogFactory.getLog(CapacitySchedulerConfiguration.class);
private static final Logger LOG =
LoggerFactory.getLogger(CapacitySchedulerConfiguration.class);
private static final String CS_CONFIGURATION_FILE = "capacity-scheduler.xml";

View File

@ -28,8 +28,8 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -63,7 +63,7 @@ import com.google.common.annotations.VisibleForTesting;
public class CapacitySchedulerQueueManager implements SchedulerQueueManager<
CSQueue, CapacitySchedulerConfiguration>{
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
CapacitySchedulerQueueManager.class);
static final Comparator<CSQueue> NON_PARTITIONED_QUEUE_COMPARATOR =

View File

@ -25,8 +25,8 @@ import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.AccessControlException;
@ -81,7 +81,8 @@ import com.google.common.collect.Sets;
@Private
@Unstable
public class LeafQueue extends AbstractCSQueue {
private static final Log LOG = LogFactory.getLog(LeafQueue.class);
private static final Logger LOG =
LoggerFactory.getLogger(LeafQueue.class);
private float absoluteUsedCapacity = 0.0f;

View File

@ -28,8 +28,8 @@ import java.util.Map;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.security.AccessControlException;
@ -78,7 +78,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
@Evolving
public class ParentQueue extends AbstractCSQueue {
private static final Log LOG = LogFactory.getLog(ParentQueue.class);
private static final Logger LOG =
LoggerFactory.getLogger(ParentQueue.class);
protected final List<CSQueue> childQueues;
private final boolean rootQueue;

View File

@ -29,8 +29,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -50,7 +50,8 @@ import com.google.common.annotations.VisibleForTesting;
@Private
public class UsersManager implements AbstractUsersManager {
private static final Log LOG = LogFactory.getLog(UsersManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(UsersManager.class);
/*
* Member declaration for UsersManager class.

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@ -43,7 +43,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
* extensible.
*/
public abstract class AbstractContainerAllocator {
private static final Log LOG = LogFactory.getLog(AbstractContainerAllocator.class);
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContainerAllocator.class);
FiCaSchedulerApp application;
AppSchedulingInfo appInfo;

View File

@ -23,8 +23,8 @@ import java.util.Iterator;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
@ -63,7 +63,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
* delayed scheduling mechanism to get better locality allocation.
*/
public class RegularContainerAllocator extends AbstractContainerAllocator {
private static final Log LOG = LogFactory.getLog(RegularContainerAllocator.class);
private static final Logger LOG =
LoggerFactory.getLogger(RegularContainerAllocator.class);
public RegularContainerAllocator(FiCaSchedulerApp application,
ResourceCalculator rc, RMContext rmContext,

View File

@ -26,8 +26,8 @@ import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.gson.GsonBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@ -45,7 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
* configuration storage in FileSystem
*/
public class FSSchedulerConfigurationStore extends YarnConfigurationStore {
public static final Log LOG = LogFactory.getLog(
public static final Logger LOG = LoggerFactory.getLogger(
FSSchedulerConfigurationStore.class);
@VisibleForTesting

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -62,8 +62,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
*/
public class LeveldbConfigurationStore extends YarnConfigurationStore {
public static final Log LOG =
LogFactory.getLog(LeveldbConfigurationStore.class);
public static final Logger LOG =
LoggerFactory.getLogger(LeveldbConfigurationStore.class);
private static final String DB_NAME = "yarn-conf-store";
private static final String LOG_KEY = "log";

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -49,8 +49,8 @@ import java.util.Map;
public class MutableCSConfigurationProvider implements CSConfigurationProvider,
MutableConfigurationProvider {
public static final Log LOG =
LogFactory.getLog(MutableCSConfigurationProvider.class);
public static final Logger LOG =
LoggerFactory.getLogger(MutableCSConfigurationProvider.class);
private Configuration schedConf;
private Configuration oldConf;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@ -46,8 +46,8 @@ import java.util.Map;
*/
public abstract class YarnConfigurationStore {
public static final Log LOG =
LogFactory.getLog(YarnConfigurationStore.class);
public static final Logger LOG =
LoggerFactory.getLogger(YarnConfigurationStore.class);
/**
* LogMutation encapsulates the fields needed for configuration mutation
* audit logging and recovery.

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
*/
public final class YarnConfigurationStoreFactory {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
YarnConfigurationStoreFactory.class);
private YarnConfigurationStoreFactory() {

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.curator.ZKCuratorManager;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -45,8 +45,8 @@ import java.util.Map;
*/
public class ZKConfigurationStore extends YarnConfigurationStore {
public static final Log LOG =
LogFactory.getLog(ZKConfigurationStore.class);
public static final Logger LOG =
LoggerFactory.getLogger(ZKConfigurationStore.class);
private long maxLogs;

View File

@ -27,8 +27,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@ -98,7 +98,8 @@ import com.google.common.annotations.VisibleForTesting;
@Private
@Unstable
public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
private static final Log LOG = LogFactory.getLog(FiCaSchedulerApp.class);
private static final Logger LOG =
LoggerFactory.getLogger(FiCaSchedulerApp.class);
private final Set<ContainerId> containersToPreempt =
new HashSet<ContainerId>();

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -40,7 +40,8 @@ import java.util.Set;
public class FiCaSchedulerNode extends SchedulerNode {
private static final Log LOG = LogFactory.getLog(FiCaSchedulerNode.class);
private static final Logger LOG =
LoggerFactory.getLogger(FiCaSchedulerNode.class);
private Map<ContainerId, RMContainer> killableContainers = new HashMap<>();
private Resource totalKillableResources = Resource.newInstance(0, 0);

View File

@ -22,8 +22,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.service.AbstractService;
@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
public abstract class PlacementConstraintManagerService extends AbstractService
implements PlacementConstraintManager {
protected static final Log LOG =
LogFactory.getLog(PlacementConstraintManagerService.class);
protected static final Logger LOG =
LoggerFactory.getLogger(PlacementConstraintManagerService.class);
private PlacementConstraintManager placementConstraintManager = null;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
import java.util.Iterator;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.*;
@ -48,8 +48,8 @@ import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE_PART
@Public
@Unstable
public final class PlacementConstraintsUtil {
private static final Log LOG =
LogFactory.getLog(PlacementConstraintsUtil.class);
private static final Logger LOG =
LoggerFactory.getLogger(PlacementConstraintsUtil.class);
// Suppresses default constructor, ensuring non-instantiability.
private PlacementConstraintsUtil() {

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
@ -47,7 +47,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
*/
public class NodeQueueLoadMonitor implements ClusterMonitor {
final static Log LOG = LogFactory.getLog(NodeQueueLoadMonitor.class);
final static Logger LOG = LoggerFactory.
getLogger(NodeQueueLoadMonitor.class);
/**
* The comparator used to specify the metric against which the load

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -59,7 +59,7 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.alloc
@Unstable
public class AllocationFileLoaderService extends AbstractService {
public static final Log LOG = LogFactory.getLog(
public static final Logger LOG = LoggerFactory.getLogger(
AllocationFileLoaderService.class.getName());
/** Time to wait between checks of the allocation file */

View File

@ -28,8 +28,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@ -70,7 +70,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
public class FSAppAttempt extends SchedulerApplicationAttempt
implements Schedulable {
private static final Log LOG = LogFactory.getLog(FSAppAttempt.class);
private static final Logger LOG =
LoggerFactory.getLogger(FSAppAttempt.class);
private static final DefaultResourceCalculator RESOURCE_CALCULATOR
= new DefaultResourceCalculator();

View File

@ -31,8 +31,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.TreeSet;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
@ -54,7 +54,8 @@ import static org.apache.hadoop.yarn.util.resource.Resources.none;
@Private
@Unstable
public class FSLeafQueue extends FSQueue {
private static final Log LOG = LogFactory.getLog(FSLeafQueue.class.getName());
private static final Logger LOG = LoggerFactory.
getLogger(FSLeafQueue.class.getName());
private static final List<FSQueue> EMPTY_LIST = Collections.emptyList();
private FSContext context;

View File

@ -27,8 +27,8 @@ import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
@ -44,7 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicat
@Private
@Unstable
public class FSParentQueue extends FSQueue {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
FSParentQueue.class.getName());
private final List<FSQueue> childQueues = new ArrayList<>();

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Resource;
@ -40,7 +40,8 @@ import java.util.concurrent.locks.Lock;
* Thread that handles FairScheduler preemption.
*/
class FSPreemptionThread extends Thread {
private static final Log LOG = LogFactory.getLog(FSPreemptionThread.class);
private static final Logger LOG = LoggerFactory.
getLogger(FSPreemptionThread.class);
protected final FSContext context;
private final FairScheduler scheduler;
private final long warnTimeBeforeKill;

View File

@ -23,8 +23,8 @@ import java.util.Collection;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.ipc.Server;
@ -51,7 +51,7 @@ import com.google.common.annotations.VisibleForTesting;
@Private
@Unstable
public abstract class FSQueue implements Queue, Schedulable {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
FSQueue.class.getName());
private Resource fairShare = Resources.createResource(0, 0);

Some files were not shown because too many files have changed in this diff Show More