YARN-9363. Replaced debug logging with SLF4J parameterized log message.

Contributed by Prabhu Joseph
This commit is contained in:
Eric Yang 2019-03-18 13:57:18 -04:00
parent 19b22c4385
commit 5f6e225166
42 changed files with 188 additions and 277 deletions

View File

@ -242,9 +242,7 @@ public <T extends FileSystem & Renewable> void removeRenewAction(
} catch (InterruptedException ie) {
LOG.error("Interrupted while canceling token for " + fs.getUri()
+ "filesystem");
if (LOG.isDebugEnabled()) {
LOG.debug("Exception in removeRenewAction: ", ie);
}
LOG.debug("Exception in removeRenewAction: {}", ie);
}
}
}

View File

@ -212,11 +212,8 @@ public static synchronized void createConfigFileAndAddLocalResource(
log.info("Component instance conf dir already exists: " + compInstanceDir);
}
if (log.isDebugEnabled()) {
log.debug("Tokens substitution for component instance: " + instance
.getCompInstanceName() + System.lineSeparator()
+ tokensForSubstitution);
}
log.debug("Tokens substitution for component instance: {}{}{}" + instance
.getCompInstanceName(), System.lineSeparator(), tokensForSubstitution);
for (ConfigFile originalFile : compLaunchContext.getConfiguration()
.getFiles()) {

View File

@ -287,9 +287,7 @@ private void writeDomain(ApplicationAttemptId appAttemptId,
Path domainLogPath =
new Path(attemptDirCache.getAppAttemptDir(appAttemptId),
DOMAIN_LOG_PREFIX + appAttemptId.toString());
if (LOG.isDebugEnabled()) {
LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
}
LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
this.logFDsCache.writeDomainLog(
fs, domainLogPath, objMapper, domain, isAppendSupported);
}

View File

@ -27,8 +27,8 @@
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.unix.DomainSocketAddress;
import io.netty.util.concurrent.DefaultThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.SocketAddress;
@ -39,7 +39,8 @@
*/
public final class CsiGrpcClient implements AutoCloseable {
private static final Log LOG = LogFactory.getLog(CsiGrpcClient.class);
private static final Logger LOG =
LoggerFactory.getLogger(CsiGrpcClient.class);
private final ManagedChannel channel;

View File

@ -25,7 +25,8 @@
import org.apache.hadoop.yarn.csi.utils.GrpcHelper;
import java.io.IOException;
import java.util.logging.Logger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A fake implementation of CSI driver.
@ -33,7 +34,7 @@
*/
public class FakeCsiDriver {
private static final Logger LOG = Logger
private static final Logger LOG = LoggerFactory
.getLogger(FakeCsiDriver.class.getName());
private Server server;

View File

@ -21,8 +21,8 @@
import java.util.LinkedHashSet;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
@ -33,7 +33,8 @@
* Set of utility methods to be used across timeline reader and collector.
*/
public final class TimelineServerUtils {
private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class);
private static final Logger LOG =
LoggerFactory.getLogger(TimelineServerUtils.class);
private TimelineServerUtils() {
}

View File

@ -36,8 +36,8 @@
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.FileContext;
@ -68,8 +68,8 @@
*/
public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
private static final Log LOG = LogFactory
.getLog(WindowsSecureContainerExecutor.class);
private static final Logger LOG = LoggerFactory
.getLogger(WindowsSecureContainerExecutor.class);
public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s";
@ -591,10 +591,7 @@ protected LocalWrapperScriptBuilder getLocalWrapperScriptBuilder(
@Override
protected void copyFile(Path src, Path dst, String owner) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("copyFile: %s -> %s owner:%s", src.toString(),
dst.toString(), owner));
}
LOG.debug("copyFile: {} -> {} owner:{}", src, dst, owner);
Native.Elevated.copy(src, dst, true);
Native.Elevated.chown(dst, owner, nodeManagerGroup);
}
@ -607,10 +604,7 @@ protected void createDir(Path dirPath, FsPermission perms,
// This is similar to how LCE creates dirs
//
perms = new FsPermission(DIR_PERM);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("createDir: %s perm:%s owner:%s",
dirPath.toString(), perms.toString(), owner));
}
LOG.debug("createDir: {} perm:{} owner:{}", dirPath, perms, owner);
super.createDir(dirPath, perms, createParent, owner);
lfs.setOwner(dirPath, owner, nodeManagerGroup);
@ -619,10 +613,7 @@ protected void createDir(Path dirPath, FsPermission perms,
@Override
protected void setScriptExecutable(Path script, String owner)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("setScriptExecutable: %s owner:%s",
script.toString(), owner));
}
LOG.debug("setScriptExecutable: {} owner:{}", script, owner);
super.setScriptExecutable(script, owner);
Native.Elevated.chown(script, owner, nodeManagerGroup);
}
@ -630,10 +621,7 @@ protected void setScriptExecutable(Path script, String owner)
@Override
public Path localizeClasspathJar(Path jarPath, Path target, String owner)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("localizeClasspathJar: %s %s o:%s",
jarPath, target, owner));
}
LOG.debug("localizeClasspathJar: {} {} o:{}", jarPath, target, owner);
createDir(target, new FsPermission(DIR_PERM), true, owner);
String fileName = jarPath.getName();
Path dst = new Path(target, fileName);
@ -669,9 +657,7 @@ public void startLocalizer(LocalizerStartContext ctx) throws IOException,
copyFile(nmPrivateContainerTokensPath, tokenDst, user);
File cwdApp = new File(appStorageDir.toString());
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("cwdApp: %s", cwdApp));
}
LOG.debug("cwdApp: {}", cwdApp);
List<String> command ;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -42,7 +42,7 @@
*/
public class RecoverPausedContainerLaunch extends ContainerLaunch {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
RecoveredContainerLaunch.class);
public RecoverPausedContainerLaunch(Context context,

View File

@ -19,8 +19,8 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.ApplicationConstants;
@ -56,8 +56,8 @@
* a container to kill. The algorithm that picks the container is a plugin.
*/
public class CGroupElasticMemoryController extends Thread {
protected static final Log LOG = LogFactory
.getLog(CGroupElasticMemoryController.class);
protected static final Logger LOG = LoggerFactory
.getLogger(CGroupElasticMemoryController.class);
private final Clock clock = new MonotonicClock();
private String yarnCGroupPath;
private String oomListenerPath;

View File

@ -561,11 +561,8 @@ public void updateCGroupParam(CGroupController controller, String cGroupId,
String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param);
PrintWriter pw = null;
if (LOG.isDebugEnabled()) {
LOG.debug(
String.format("updateCGroupParam for path: %s with value %s",
cGroupParamPath, value));
}
LOG.debug("updateCGroupParam for path: {} with value {}",
cGroupParamPath, value);
try {
File file = new File(cGroupParamPath);

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.CpuTimeTracker;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.SysInfoLinux;
@ -63,8 +63,8 @@ enum Result {
Continue,
Exit
}
protected static final Log LOG = LogFactory
.getLog(CGroupsResourceCalculator.class);
protected static final Logger LOG = LoggerFactory
.getLogger(CGroupsResourceCalculator.class);
private static final String PROCFS = "/proc";
static final String CGROUP = "cgroup";
static final String CPU_STAT = "cpuacct.stat";
@ -145,9 +145,7 @@ public void initialize() throws YarnException {
@Override
public float getCpuUsagePercent() {
if (LOG.isDebugEnabled()) {
LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies);
}
LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies);
return cpuTimeTracker.getCpuTrackerUsagePercent();
}
@ -187,9 +185,9 @@ public void updateProcessTree() {
processPhysicalMemory = getMemorySize(memStat);
if (memswStat.exists()) {
processVirtualMemory = getMemorySize(memswStat);
} else if(LOG.isDebugEnabled()) {
LOG.debug("Swap cgroups monitoring is not compiled into the kernel " +
memswStat.getAbsolutePath().toString());
} else {
LOG.debug("Swap cgroups monitoring is not compiled into the kernel {}",
memswStat.getAbsolutePath());
}
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
@ -29,8 +29,8 @@
* it is backward compatible with procfs in terms of virtual memory usage.
*/
public class CombinedResourceCalculator extends ResourceCalculatorProcessTree {
protected static final Log LOG = LogFactory
.getLog(CombinedResourceCalculator.class);
protected static final Logger LOG = LoggerFactory
.getLogger(CombinedResourceCalculator.class);
private ProcfsBasedProcessTree procfs;
private CGroupsResourceCalculator cgroup;

View File

@ -19,8 +19,8 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ExecutionType;
@ -46,8 +46,8 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DefaultOOMHandler implements Runnable {
protected static final Log LOG = LogFactory
.getLog(DefaultOOMHandler.class);
protected static final Logger LOG = LoggerFactory
.getLogger(DefaultOOMHandler.class);
private final Context context;
private final String memoryStatFile;
private final CGroupsHandler cgroups;

View File

@ -20,8 +20,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -32,7 +32,7 @@
*
*/
public final class NetworkTagMappingManagerFactory {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
NetworkTagMappingManagerFactory.class);
private NetworkTagMappingManagerFactory() {}

View File

@ -22,8 +22,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.Context;
@ -44,7 +44,8 @@
* */
public class FpgaResourceAllocator {
static final Log LOG = LogFactory.getLog(FpgaResourceAllocator.class);
static final Logger LOG = LoggerFactory.
getLogger(FpgaResourceAllocator.class);
private List<FpgaDevice> allowedFpgas = new LinkedList<>();

View File

@ -20,8 +20,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -50,7 +50,8 @@
@InterfaceAudience.Private
public class FpgaResourceHandlerImpl implements ResourceHandler {
static final Log LOG = LogFactory.getLog(FpgaResourceHandlerImpl.class);
static final Logger LOG = LoggerFactory.
getLogger(FpgaResourceHandlerImpl.class);
private final String REQUEST_FPGA_IP_ID_KEY = "REQUESTED_FPGA_IP_ID";

View File

@ -21,8 +21,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -52,7 +52,8 @@
* Allocate GPU resources according to requirements
*/
public class GpuResourceAllocator {
final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
final static Logger LOG = LoggerFactory.
getLogger(GpuResourceAllocator.class);
private static final int WAIT_MS_PER_LOOP = 1000;
private Set<GpuDevice> allowedGpuDevices = new TreeSet<>();

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -41,8 +41,8 @@
import java.util.List;
public class GpuResourceHandlerImpl implements ResourceHandler {
final static Log LOG = LogFactory
.getLog(GpuResourceHandlerImpl.class);
final static Logger LOG = LoggerFactory
.getLogger(GpuResourceHandlerImpl.class);
// This will be used by container-executor to add necessary clis
public static final String EXCLUDED_GPUS_CLI_OPTION = "--excluded_gpus";

View File

@ -20,8 +20,8 @@
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -36,7 +36,8 @@ public class NumaNodeResource {
private long usedMemory;
private int usedCpus;
private static final Log LOG = LogFactory.getLog(NumaNodeResource.class);
private static final Logger LOG = LoggerFactory.
getLogger(NumaNodeResource.class);
private Map<ContainerId, Long> containerVsMemUsage =
new ConcurrentHashMap<>();

View File

@ -29,8 +29,8 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
@ -51,7 +51,8 @@
*/
public class NumaResourceAllocator {
private static final Log LOG = LogFactory.getLog(NumaResourceAllocator.class);
private static final Logger LOG = LoggerFactory.
getLogger(NumaResourceAllocator.class);
// Regex to find node ids, Ex: 'available: 2 nodes (0-1)'
private static final String NUMA_NODEIDS_REGEX =

View File

@ -20,8 +20,8 @@
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -39,8 +39,8 @@
*/
public class NumaResourceHandlerImpl implements ResourceHandler {
private static final Log LOG = LogFactory
.getLog(NumaResourceHandlerImpl.class);
private static final Logger LOG = LoggerFactory
.getLogger(NumaResourceHandlerImpl.class);
private final NumaResourceAllocator numaResourceAllocator;
private final String numaCtlCmd;

View File

@ -22,8 +22,8 @@
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@ -52,7 +52,8 @@
* scheduler.
* */
public class DeviceMappingManager {
static final Log LOG = LogFactory.getLog(DeviceMappingManager.class);
static final Logger LOG = LoggerFactory.
getLogger(DeviceMappingManager.class);
private Context nmContext;
private static final int WAIT_MS_PER_LOOP = 1000;
@ -163,10 +164,7 @@ private synchronized DeviceAllocation internalAssignDevices(
ContainerId containerId = container.getContainerId();
int requestedDeviceCount = getRequestedDeviceCount(resourceName,
requestedResource);
if (LOG.isDebugEnabled()) {
LOG.debug("Try allocating " + requestedDeviceCount
+ " " + resourceName);
}
LOG.debug("Try allocating {} {}", requestedDeviceCount, resourceName);
// Assign devices to container if requested some.
if (requestedDeviceCount > 0) {
if (requestedDeviceCount > getAvailableDevices(resourceName)) {
@ -266,10 +264,8 @@ public synchronized void cleanupAssignedDevices(String resourceName,
while (iter.hasNext()) {
entry = iter.next();
if (entry.getValue().equals(containerId)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recycle devices: " + entry.getKey()
+ ", type: " + resourceName + " from " + containerId);
}
LOG.debug("Recycle devices: {}, type: {} from {}", entry.getKey(),
resourceName, containerId);
iter.remove();
}
}
@ -317,10 +313,8 @@ private void pickAndDoSchedule(Set<Device> allowed,
ContainerId containerId = c.getContainerId();
Map<String, String> env = c.getLaunchContext().getEnvironment();
if (null == dps) {
if (LOG.isDebugEnabled()) {
LOG.debug("Customized device plugin scheduler is preferred "
+ "but not implemented, use default logic");
}
LOG.debug("Customized device plugin scheduler is preferred "
+ "but not implemented, use default logic");
defaultScheduleAction(allowed, used,
assigned, containerId, count);
} else {

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.Context;
@ -47,7 +47,8 @@
*
* */
public class DevicePluginAdapter implements ResourcePlugin {
private final static Log LOG = LogFactory.getLog(DevicePluginAdapter.class);
private final static Logger LOG = LoggerFactory.
getLogger(DevicePluginAdapter.class);
private final String resourceName;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin;
@ -47,7 +47,7 @@
public class DeviceResourceDockerRuntimePluginImpl
implements DockerCommandPlugin {
final static Log LOG = LogFactory.getLog(
final static Logger LOG = LoggerFactory.getLogger(
DeviceResourceDockerRuntimePluginImpl.class);
private String resourceName;
@ -73,9 +73,7 @@ public DeviceResourceDockerRuntimePluginImpl(String resourceName,
public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
Container container) throws ContainerExecutionException {
String containerId = container.getContainerId().toString();
if (LOG.isDebugEnabled()) {
LOG.debug("Try to update docker run command for: " + containerId);
}
LOG.debug("Try to update docker run command for: {}", containerId);
if(!requestedDevice(resourceName, container)) {
return;
}
@ -89,17 +87,12 @@ public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
}
// handle runtime
dockerRunCommand.addRuntime(deviceRuntimeSpec.getContainerRuntime());
if (LOG.isDebugEnabled()) {
LOG.debug("Handle docker container runtime type: "
+ deviceRuntimeSpec.getContainerRuntime() + " for container: "
+ containerId);
}
LOG.debug("Handle docker container runtime type: {} for container: {}",
deviceRuntimeSpec.getContainerRuntime(), containerId);
// handle device mounts
Set<MountDeviceSpec> deviceMounts = deviceRuntimeSpec.getDeviceMounts();
if (LOG.isDebugEnabled()) {
LOG.debug("Handle device mounts: " + deviceMounts + " for container: "
+ containerId);
}
LOG.debug("Handle device mounts: {} for container: {}", deviceMounts,
containerId);
for (MountDeviceSpec mountDeviceSpec : deviceMounts) {
dockerRunCommand.addDevice(
mountDeviceSpec.getDevicePathInHost(),
@ -107,10 +100,8 @@ public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
}
// handle volume mounts
Set<MountVolumeSpec> mountVolumeSpecs = deviceRuntimeSpec.getVolumeMounts();
if (LOG.isDebugEnabled()) {
LOG.debug("Handle volume mounts: " + mountVolumeSpecs + " for container: "
+ containerId);
}
LOG.debug("Handle volume mounts: {} for container: {}", mountVolumeSpecs,
containerId);
for (MountVolumeSpec mountVolumeSpec : mountVolumeSpecs) {
if (mountVolumeSpec.getReadOnly()) {
dockerRunCommand.addReadOnlyMountLocation(
@ -124,10 +115,8 @@ public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
}
// handle envs
dockerRunCommand.addEnv(deviceRuntimeSpec.getEnvs());
if (LOG.isDebugEnabled()) {
LOG.debug("Handle envs: " + deviceRuntimeSpec.getEnvs()
+ " for container: " + containerId);
}
LOG.debug("Handle envs: {} for container: {}",
deviceRuntimeSpec.getEnvs(), containerId);
}
@Override
@ -147,10 +136,8 @@ public DockerVolumeCommand getCreateDockerVolumeCommand(Container container)
DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND);
command.setDriverName(volumeSec.getVolumeDriver());
command.setVolumeName(volumeSec.getVolumeName());
if (LOG.isDebugEnabled()) {
LOG.debug("Get volume create request from plugin:" + volumeClaims
+ " for container: " + container.getContainerId().toString());
}
LOG.debug("Get volume create request from plugin:{} for container: {}",
volumeClaims, container.getContainerId());
return command;
}
}
@ -195,10 +182,8 @@ private Set<Device> getAllocatedDevices(Container container) {
allocated = devicePluginAdapter
.getDeviceMappingManager()
.getAllocatedDevices(resourceName, containerId);
if (LOG.isDebugEnabled()) {
LOG.debug("Get allocation from deviceMappingManager: "
+ allocated + ", " + resourceName + " for container: " + containerId);
}
LOG.debug("Get allocation from deviceMappingManager: {}, {} for"
+ " container: {}", allocated, resourceName, containerId);
cachedAllocation.put(containerId, allocated);
return allocated;
}

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -53,7 +53,8 @@
* */
public class DeviceResourceHandlerImpl implements ResourceHandler {
static final Log LOG = LogFactory.getLog(DeviceResourceHandlerImpl.class);
static final Logger LOG = LoggerFactory.
getLogger(DeviceResourceHandlerImpl.class);
private final String resourceName;
private final DevicePlugin devicePlugin;
@ -134,10 +135,7 @@ public synchronized List<PrivilegedOperation> preStart(Container container)
String containerIdStr = container.getContainerId().toString();
DeviceMappingManager.DeviceAllocation allocation =
deviceMappingManager.assignDevices(resourceName, container);
if (LOG.isDebugEnabled()) {
LOG.debug("Allocated to "
+ containerIdStr + ": " + allocation);
}
LOG.debug("Allocated to {}: {}", containerIdStr, allocation);
DeviceRuntimeSpec spec;
try {
spec = devicePlugin.onDevicesAllocated(
@ -291,13 +289,9 @@ public DeviceType getDeviceType(Device device) {
}
DeviceType deviceType;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Try to get device type from device path: " + devName);
}
LOG.debug("Try to get device type from device path: {}", devName);
String output = shellWrapper.getDeviceFileType(devName);
if (LOG.isDebugEnabled()) {
LOG.debug("stat output:" + output);
}
LOG.debug("stat output:{}", output);
deviceType = output.startsWith("c") ? DeviceType.CHAR : DeviceType.BLOCK;
} catch (IOException e) {
String msg =

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
@ -33,7 +33,8 @@
* */
public class DeviceResourceUpdaterImpl extends NodeResourceUpdaterPlugin {
final static Log LOG = LogFactory.getLog(DeviceResourceUpdaterImpl.class);
final static Logger LOG = LoggerFactory.
getLogger(DeviceResourceUpdaterImpl.class);
private String resourceName;
private DevicePlugin devicePlugin;

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -37,7 +37,8 @@
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMResourceInfo;
public class FpgaResourcePlugin implements ResourcePlugin {
private static final Log LOG = LogFactory.getLog(FpgaResourcePlugin.class);
private static final Logger LOG = LoggerFactory.
getLogger(FpgaResourcePlugin.class);
private ResourceHandler fpgaResourceHandler = null;

View File

@ -20,8 +20,8 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -50,7 +50,8 @@
* Implementation to use nvidia-docker v1 as GPU docker command plugin.
*/
public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
final static Log LOG = LogFactory.getLog(NvidiaDockerV1CommandPlugin.class);
final static Logger LOG = LoggerFactory.
getLogger(NvidiaDockerV1CommandPlugin.class);
private Configuration conf;
private Map<String, Set<String>> additionalCommands = null;
@ -121,9 +122,7 @@ private void init() throws ContainerExecutionException {
addToCommand(DEVICE_OPTION, getValue(str));
} else if (str.startsWith(VOLUME_DRIVER_OPTION)) {
volumeDriver = getValue(str);
if (LOG.isDebugEnabled()) {
LOG.debug("Found volume-driver:" + volumeDriver);
}
LOG.debug("Found volume-driver:{}", volumeDriver);
} else if (str.startsWith(MOUNT_RO_OPTION)) {
String mount = getValue(str);
if (!mount.endsWith(":ro")) {
@ -286,15 +285,11 @@ public DockerVolumeCommand getCreateDockerVolumeCommand(Container container)
if (VOLUME_NAME_PATTERN.matcher(mountSource).matches()) {
// This is a valid named volume
newVolumeName = mountSource;
if (LOG.isDebugEnabled()) {
LOG.debug("Found volume name for GPU:" + newVolumeName);
}
LOG.debug("Found volume name for GPU:{}", newVolumeName);
break;
} else{
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to match " + mountSource
+ " to named-volume regex pattern");
}
LOG.debug("Failed to match {} to named-volume regex pattern",
mountSource);
}
}
}

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
@ -41,7 +41,8 @@
* Implementation to use nvidia-docker v2 as GPU docker command plugin.
*/
public class NvidiaDockerV2CommandPlugin implements DockerCommandPlugin {
final static Log LOG = LogFactory.getLog(NvidiaDockerV2CommandPlugin.class);
final static Logger LOG = LoggerFactory.
getLogger(NvidiaDockerV2CommandPlugin.class);
private String nvidiaRuntime = "nvidia";
private String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES";

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -33,8 +33,8 @@
* Class for testing {@link NodeManagerMXBean} implementation.
*/
public class TestNodeManagerMXBean {
public static final Log LOG = LogFactory.getLog(
TestNodeManagerMXBean.class);
public static final Logger LOG = LoggerFactory.getLogger(
TestNodeManagerMXBean.class);
@Test
public void testNodeManagerMXBean() throws Exception {

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@ -43,8 +43,8 @@
* Test for elastic non-strict memory controller based on cgroups.
*/
public class TestCGroupElasticMemoryController {
protected static final Log LOG = LogFactory
.getLog(TestCGroupElasticMemoryController.class);
protected static final Logger LOG = LoggerFactory
.getLogger(TestCGroupElasticMemoryController.class);
private YarnConfiguration conf = new YarnConfiguration();
private File script = new File("target/" +
TestCGroupElasticMemoryController.class.getName());

View File

@ -92,10 +92,8 @@ public void setEntitlement(String nodeLabel, QueueEntitlement entitlement)
// note: we currently set maxCapacity to capacity
// this might be revised later
setMaxCapacity(nodeLabel, entitlement.getMaxCapacity());
if (LOG.isDebugEnabled()) {
LOG.debug("successfully changed to {} for queue {}", capacity, this
LOG.debug("successfully changed to {} for queue {}", capacity, this
.getQueueName());
}
//update queue used capacity etc
CSQueueUtils.updateQueueStatistics(resourceCalculator,

View File

@ -1053,10 +1053,8 @@ private void addApplicationAttempt(
+ " to scheduler from user " + application.getUser() + " in queue "
+ queue.getQueueName());
if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationAttemptId
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
applicationAttemptId);
} else{
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(applicationAttemptId,

View File

@ -593,10 +593,8 @@ public CSAssignment assignContainers(Resource clusterResource,
NodeType.NODE_LOCAL);
while (canAssign(clusterResource, node)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to assign containers to child-queue of "
+ getQueueName());
}
LOG.debug("Trying to assign containers to child-queue of {}",
getQueueName());
// Are we over maximum-capacity for this queue?
// This will also consider parent's limits and also continuous reservation
@ -781,10 +779,8 @@ private CSAssignment assignContainersToChildQueues(Resource cluster,
for (Iterator<CSQueue> iter = sortAndGetChildrenAllocationIterator(
candidates.getPartition()); iter.hasNext(); ) {
CSQueue childQueue = iter.next();
if(LOG.isDebugEnabled()) {
LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath()
+ " stats: " + childQueue);
}
LOG.debug("Trying to assign to queue: {} stats: {}",
childQueue.getQueuePath(), childQueue);
// Get ResourceLimits of child queue before assign containers
ResourceLimits childLimits =

View File

@ -221,15 +221,10 @@ List<QueueManagementChange> manageAutoCreatedLeafQueues()
+ parentQueue.getQueueName(), e);
}
} else{
if (LOG.isDebugEnabled()) {
LOG.debug(
"Skipping queue management updates for parent queue "
+ parentQueue
.getQueuePath() + " "
+ "since configuration for auto creating queues beyond "
+ "parent's "
+ "guaranteed capacity is disabled");
}
LOG.debug("Skipping queue management updates for parent queue {} "
+ "since configuration for auto creating queues beyond "
+ "parent's guaranteed capacity is disabled",
parentQueue.getQueuePath());
}
return queueManagementChanges;
}

View File

@ -669,19 +669,15 @@ public void commitQueueManagementChanges(
if (updatedQueueTemplate.getQueueCapacities().
getCapacity(nodeLabel) > 0) {
if (isActive(leafQueue, nodeLabel)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Queue is already active." + " Skipping activation : "
+ leafQueue.getQueueName());
}
LOG.debug("Queue is already active. Skipping activation : {}",
leafQueue.getQueueName());
} else{
activate(leafQueue, nodeLabel);
}
} else{
if (!isActive(leafQueue, nodeLabel)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Queue is already de-activated. Skipping "
+ "de-activation : " + leafQueue.getQueueName());
}
LOG.debug("Queue is already de-activated. Skipping "
+ "de-activation : {}", leafQueue.getQueueName());
} else{
deactivate(leafQueue, nodeLabel);
}

View File

@ -154,18 +154,13 @@ private static boolean getNodeConstraintEvaluatedResult(
if (schedulerNode.getNodeAttributes() == null ||
!schedulerNode.getNodeAttributes().contains(requestAttribute)) {
if (opCode == NodeAttributeOpCode.NE) {
if (LOG.isDebugEnabled()) {
LOG.debug("Incoming requestAttribute:" + requestAttribute
+ "is not present in " + schedulerNode.getNodeID()
+ ", however opcode is NE. Hence accept this node.");
}
LOG.debug("Incoming requestAttribute:{} is not present in {},"
+ " however opcode is NE. Hence accept this node.",
requestAttribute, schedulerNode.getNodeID());
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Incoming requestAttribute:" + requestAttribute
+ "is not present in " + schedulerNode.getNodeID()
+ ", skip such node.");
}
LOG.debug("Incoming requestAttribute:{} is not present in {},"
+ " skip such node.", requestAttribute, schedulerNode.getNodeID());
return false;
}
@ -183,21 +178,16 @@ private static boolean getNodeConstraintEvaluatedResult(
}
if (requestAttribute.equals(nodeAttribute)) {
if (isOpCodeMatches(requestAttribute, nodeAttribute, opCode)) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Incoming requestAttribute:" + requestAttribute
+ " matches with node:" + schedulerNode.getNodeID());
}
LOG.debug("Incoming requestAttribute:{} matches with node:{}",
requestAttribute, schedulerNode.getNodeID());
found = true;
return found;
}
}
}
if (!found) {
if (LOG.isDebugEnabled()) {
LOG.info("skip this node:" + schedulerNode.getNodeID()
+ " for requestAttribute:" + requestAttribute);
}
LOG.debug("skip this node:{} for requestAttribute:{}",
schedulerNode.getNodeID(), requestAttribute);
return false;
}
return true;

View File

@ -426,10 +426,8 @@ public abstract void collectSchedulerApplications(
*/
boolean assignContainerPreCheck(FSSchedulerNode node) {
if (node.getReservedContainer() != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Assigning container failed on node '" + node.getNodeName()
+ " because it has reserved containers.");
}
LOG.debug("Assigning container failed on node '{}' because it has"
+ " reserved containers.", node.getNodeName());
return false;
} else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) {
if (LOG.isDebugEnabled()) {

View File

@ -397,9 +397,8 @@ public synchronized void addApplication(ApplicationId applicationId,
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", currently num of applications: " + applications.size());
if (isAppRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
}
LOG.debug("{} is recovering. Skip notifying APP_ACCEPTED",
applicationId);
} else {
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
@ -429,10 +428,8 @@ public synchronized void addApplication(ApplicationId applicationId,
LOG.info("Added Application Attempt " + appAttemptId
+ " to scheduler from user " + application.getUser());
if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(appAttemptId
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
appAttemptId);
} else {
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(appAttemptId,

View File

@ -396,10 +396,8 @@ public boolean precheckNode(SchedulerNode schedulerNode,
SchedulingMode schedulingMode) {
// We will only look at node label = nodeLabelToLookAt according to
// schedulingMode and partition of node.
if(LOG.isDebugEnabled()) {
LOG.debug("precheckNode is invoked for " + schedulerNode.getNodeID() + ","
+ schedulingMode);
}
LOG.debug("precheckNode is invoked for {},{}", schedulerNode.getNodeID(),
schedulingMode);
String nodePartitionToLookAt;
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
nodePartitionToLookAt = schedulerNode.getPartition();

View File

@ -223,10 +223,8 @@ public synchronized void addTask(Task task) {
if (requests == null) {
requests = new HashMap<String, ResourceRequest>();
this.requests.put(schedulerKey, requests);
if(LOG.isDebugEnabled()) {
LOG.debug("Added priority=" + schedulerKey.getPriority()
+ " application="+ applicationId);
}
LOG.debug("Added priority={} application={}", schedulerKey.getPriority(),
applicationId);
}
final Resource capability = requestSpec.get(schedulerKey);
@ -242,10 +240,7 @@ public synchronized void addTask(Task task) {
LOG.info("Added task " + task.getTaskId() + " to application " +
applicationId + " at priority " + schedulerKey.getPriority());
if(LOG.isDebugEnabled()) {
LOG.debug("addTask: application=" + applicationId
+ " #asks=" + ask.size());
}
LOG.debug("addTask: application={} #asks={}", applicationId, ask.size());
// Create resource requests
for (String host : task.getHosts()) {
@ -320,12 +315,12 @@ private synchronized void addResourceRequest(
public synchronized List<Container> getResources() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("getResources begin:" + " application=" + applicationId
+ " #ask=" + ask.size());
LOG.debug("getResources begin: application={} #ask={}",
applicationId, ask.size());
for (ResourceRequest request : ask) {
LOG.debug("getResources:" + " application=" + applicationId
+ " ask-request=" + request);
LOG.debug("getResources: application={} ask-request={}",
applicationId, request);
}
}
@ -346,8 +341,8 @@ public synchronized List<Container> getResources() throws IOException {
ask.clear();
if(LOG.isDebugEnabled()) {
LOG.debug("getResources() for " + applicationId + ":"
+ " ask=" + ask.size() + " received=" + containers.size());
LOG.debug("getResources() for {}: ask={} received={}",
applicationId, ask.size(), containers.size());
}
return containers;
@ -451,10 +446,8 @@ private void updateResourceRequests(Map<String, ResourceRequest> requests,
updateResourceRequest(requests.get(ResourceRequest.ANY));
if(LOG.isDebugEnabled()) {
LOG.debug("updateResourceDemands:" + " application=" + applicationId
+ " #asks=" + ask.size());
}
LOG.debug("updateResourceDemands: application={} #asks={}",
applicationId, ask.size());
}
private void updateResourceRequest(ResourceRequest request) {

View File

@ -107,9 +107,7 @@ class FlowScanner implements RegionScanner, Closeable {
YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
}
if (LOG.isDebugEnabled()) {
LOG.debug(" batch size=" + batchSize);
}
LOG.debug(" batch size={}", batchSize);
}