YARN-9363. Replaced debug logging with SLF4J parameterized log message.

Contributed by Prabhu Joseph
This commit is contained in:
Eric Yang 2019-03-18 13:57:18 -04:00
parent 19b22c4385
commit 5f6e225166
42 changed files with 188 additions and 277 deletions

View File

@ -242,9 +242,7 @@ public class DelegationTokenRenewer
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
LOG.error("Interrupted while canceling token for " + fs.getUri() LOG.error("Interrupted while canceling token for " + fs.getUri()
+ "filesystem"); + "filesystem");
if (LOG.isDebugEnabled()) { LOG.debug("Exception in removeRenewAction: {}", ie);
LOG.debug("Exception in removeRenewAction: ", ie);
}
} }
} }
} }

View File

@ -212,11 +212,8 @@ public class ProviderUtils implements YarnServiceConstants {
log.info("Component instance conf dir already exists: " + compInstanceDir); log.info("Component instance conf dir already exists: " + compInstanceDir);
} }
if (log.isDebugEnabled()) { log.debug("Tokens substitution for component instance: {}{}{}" + instance
log.debug("Tokens substitution for component instance: " + instance .getCompInstanceName(), System.lineSeparator(), tokensForSubstitution);
.getCompInstanceName() + System.lineSeparator()
+ tokensForSubstitution);
}
for (ConfigFile originalFile : compLaunchContext.getConfiguration() for (ConfigFile originalFile : compLaunchContext.getConfiguration()
.getFiles()) { .getFiles()) {

View File

@ -287,9 +287,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
Path domainLogPath = Path domainLogPath =
new Path(attemptDirCache.getAppAttemptDir(appAttemptId), new Path(attemptDirCache.getAppAttemptDir(appAttemptId),
DOMAIN_LOG_PREFIX + appAttemptId.toString()); DOMAIN_LOG_PREFIX + appAttemptId.toString());
if (LOG.isDebugEnabled()) { LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
}
this.logFDsCache.writeDomainLog( this.logFDsCache.writeDomainLog(
fs, domainLogPath, objMapper, domain, isAppendSupported); fs, domainLogPath, objMapper, domain, isAppendSupported);
} }

View File

@ -27,8 +27,8 @@ import io.netty.channel.epoll.EpollDomainSocketChannel;
import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.unix.DomainSocketAddress; import io.netty.channel.unix.DomainSocketAddress;
import io.netty.util.concurrent.DefaultThreadFactory; import io.netty.util.concurrent.DefaultThreadFactory;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.net.SocketAddress; import java.net.SocketAddress;
@ -39,7 +39,8 @@ import java.util.concurrent.TimeUnit;
*/ */
public final class CsiGrpcClient implements AutoCloseable { public final class CsiGrpcClient implements AutoCloseable {
private static final Log LOG = LogFactory.getLog(CsiGrpcClient.class); private static final Logger LOG =
LoggerFactory.getLogger(CsiGrpcClient.class);
private final ManagedChannel channel; private final ManagedChannel channel;

View File

@ -25,7 +25,8 @@ import io.netty.channel.epoll.EpollServerDomainSocketChannel;
import org.apache.hadoop.yarn.csi.utils.GrpcHelper; import org.apache.hadoop.yarn.csi.utils.GrpcHelper;
import java.io.IOException; import java.io.IOException;
import java.util.logging.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* A fake implementation of CSI driver. * A fake implementation of CSI driver.
@ -33,7 +34,7 @@ import java.util.logging.Logger;
*/ */
public class FakeCsiDriver { public class FakeCsiDriver {
private static final Logger LOG = Logger private static final Logger LOG = LoggerFactory
.getLogger(FakeCsiDriver.class.getName()); .getLogger(FakeCsiDriver.class.getName());
private Server server; private Server server;

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.util.timeline;
import java.util.LinkedHashSet; import java.util.LinkedHashSet;
import java.util.Set; import java.util.Set;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter; import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
@ -33,7 +33,8 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSec
* Set of utility methods to be used across timeline reader and collector. * Set of utility methods to be used across timeline reader and collector.
*/ */
public final class TimelineServerUtils { public final class TimelineServerUtils {
private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class); private static final Logger LOG =
LoggerFactory.getLogger(TimelineServerUtils.class);
private TimelineServerUtils() { private TimelineServerUtils() {
} }

View File

@ -36,8 +36,8 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem; import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
@ -68,8 +68,8 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
*/ */
public class WindowsSecureContainerExecutor extends DefaultContainerExecutor { public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
private static final Log LOG = LogFactory private static final Logger LOG = LoggerFactory
.getLog(WindowsSecureContainerExecutor.class); .getLogger(WindowsSecureContainerExecutor.class);
public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s"; public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s";
@ -591,10 +591,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
@Override @Override
protected void copyFile(Path src, Path dst, String owner) throws IOException { protected void copyFile(Path src, Path dst, String owner) throws IOException {
if (LOG.isDebugEnabled()) { LOG.debug("copyFile: {} -> {} owner:{}", src, dst, owner);
LOG.debug(String.format("copyFile: %s -> %s owner:%s", src.toString(),
dst.toString(), owner));
}
Native.Elevated.copy(src, dst, true); Native.Elevated.copy(src, dst, true);
Native.Elevated.chown(dst, owner, nodeManagerGroup); Native.Elevated.chown(dst, owner, nodeManagerGroup);
} }
@ -607,10 +604,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
// This is similar to how LCE creates dirs // This is similar to how LCE creates dirs
// //
perms = new FsPermission(DIR_PERM); perms = new FsPermission(DIR_PERM);
if (LOG.isDebugEnabled()) { LOG.debug("createDir: {} perm:{} owner:{}", dirPath, perms, owner);
LOG.debug(String.format("createDir: %s perm:%s owner:%s",
dirPath.toString(), perms.toString(), owner));
}
super.createDir(dirPath, perms, createParent, owner); super.createDir(dirPath, perms, createParent, owner);
lfs.setOwner(dirPath, owner, nodeManagerGroup); lfs.setOwner(dirPath, owner, nodeManagerGroup);
@ -619,10 +613,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
@Override @Override
protected void setScriptExecutable(Path script, String owner) protected void setScriptExecutable(Path script, String owner)
throws IOException { throws IOException {
if (LOG.isDebugEnabled()) { LOG.debug("setScriptExecutable: {} owner:{}", script, owner);
LOG.debug(String.format("setScriptExecutable: %s owner:%s",
script.toString(), owner));
}
super.setScriptExecutable(script, owner); super.setScriptExecutable(script, owner);
Native.Elevated.chown(script, owner, nodeManagerGroup); Native.Elevated.chown(script, owner, nodeManagerGroup);
} }
@ -630,10 +621,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
@Override @Override
public Path localizeClasspathJar(Path jarPath, Path target, String owner) public Path localizeClasspathJar(Path jarPath, Path target, String owner)
throws IOException { throws IOException {
if (LOG.isDebugEnabled()) { LOG.debug("localizeClasspathJar: {} {} o:{}", jarPath, target, owner);
LOG.debug(String.format("localizeClasspathJar: %s %s o:%s",
jarPath, target, owner));
}
createDir(target, new FsPermission(DIR_PERM), true, owner); createDir(target, new FsPermission(DIR_PERM), true, owner);
String fileName = jarPath.getName(); String fileName = jarPath.getName();
Path dst = new Path(target, fileName); Path dst = new Path(target, fileName);
@ -669,9 +657,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
copyFile(nmPrivateContainerTokensPath, tokenDst, user); copyFile(nmPrivateContainerTokensPath, tokenDst, user);
File cwdApp = new File(appStorageDir.toString()); File cwdApp = new File(appStorageDir.toString());
if (LOG.isDebugEnabled()) { LOG.debug("cwdApp: {}", cwdApp);
LOG.debug(String.format("cwdApp: %s", cwdApp));
}
List<String> command ; List<String> command ;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
@ -42,7 +42,7 @@ import java.io.InterruptedIOException;
*/ */
public class RecoverPausedContainerLaunch extends ContainerLaunch { public class RecoverPausedContainerLaunch extends ContainerLaunch {
private static final Log LOG = LogFactory.getLog( private static final Logger LOG = LoggerFactory.getLogger(
RecoveredContainerLaunch.class); RecoveredContainerLaunch.class);
public RecoverPausedContainerLaunch(Context context, public RecoverPausedContainerLaunch(Context context,

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants;
@ -56,8 +56,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
* a container to kill. The algorithm that picks the container is a plugin. * a container to kill. The algorithm that picks the container is a plugin.
*/ */
public class CGroupElasticMemoryController extends Thread { public class CGroupElasticMemoryController extends Thread {
protected static final Log LOG = LogFactory protected static final Logger LOG = LoggerFactory
.getLog(CGroupElasticMemoryController.class); .getLogger(CGroupElasticMemoryController.class);
private final Clock clock = new MonotonicClock(); private final Clock clock = new MonotonicClock();
private String yarnCGroupPath; private String yarnCGroupPath;
private String oomListenerPath; private String oomListenerPath;

View File

@ -561,11 +561,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param); String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param);
PrintWriter pw = null; PrintWriter pw = null;
if (LOG.isDebugEnabled()) { LOG.debug("updateCGroupParam for path: {} with value {}",
LOG.debug( cGroupParamPath, value);
String.format("updateCGroupParam for path: %s with value %s",
cGroupParamPath, value));
}
try { try {
File file = new File(cGroupParamPath); File file = new File(cGroupParamPath);

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.CpuTimeTracker; import org.apache.hadoop.util.CpuTimeTracker;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.SysInfoLinux; import org.apache.hadoop.util.SysInfoLinux;
@ -63,8 +63,8 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
Continue, Continue,
Exit Exit
} }
protected static final Log LOG = LogFactory protected static final Logger LOG = LoggerFactory
.getLog(CGroupsResourceCalculator.class); .getLogger(CGroupsResourceCalculator.class);
private static final String PROCFS = "/proc"; private static final String PROCFS = "/proc";
static final String CGROUP = "cgroup"; static final String CGROUP = "cgroup";
static final String CPU_STAT = "cpuacct.stat"; static final String CPU_STAT = "cpuacct.stat";
@ -145,9 +145,7 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
@Override @Override
public float getCpuUsagePercent() { public float getCpuUsagePercent() {
if (LOG.isDebugEnabled()) { LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies);
LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies);
}
return cpuTimeTracker.getCpuTrackerUsagePercent(); return cpuTimeTracker.getCpuTrackerUsagePercent();
} }
@ -187,9 +185,9 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
processPhysicalMemory = getMemorySize(memStat); processPhysicalMemory = getMemorySize(memStat);
if (memswStat.exists()) { if (memswStat.exists()) {
processVirtualMemory = getMemorySize(memswStat); processVirtualMemory = getMemorySize(memswStat);
} else if(LOG.isDebugEnabled()) { } else {
LOG.debug("Swap cgroups monitoring is not compiled into the kernel " + LOG.debug("Swap cgroups monitoring is not compiled into the kernel {}",
memswStat.getAbsolutePath().toString()); memswStat.getAbsolutePath());
} }
} }

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree; import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
* it is backward compatible with procfs in terms of virtual memory usage. * it is backward compatible with procfs in terms of virtual memory usage.
*/ */
public class CombinedResourceCalculator extends ResourceCalculatorProcessTree { public class CombinedResourceCalculator extends ResourceCalculatorProcessTree {
protected static final Log LOG = LogFactory protected static final Logger LOG = LoggerFactory
.getLog(CombinedResourceCalculator.class); .getLogger(CombinedResourceCalculator.class);
private ProcfsBasedProcessTree procfs; private ProcfsBasedProcessTree procfs;
private CGroupsResourceCalculator cgroup; private CGroupsResourceCalculator cgroup;

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.ExecutionType;
@ -46,8 +46,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class DefaultOOMHandler implements Runnable { public class DefaultOOMHandler implements Runnable {
protected static final Log LOG = LogFactory protected static final Logger LOG = LoggerFactory
.getLog(DefaultOOMHandler.class); .getLogger(DefaultOOMHandler.class);
private final Context context; private final Context context;
private final String memoryStatFile; private final String memoryStatFile;
private final CGroupsHandler cgroups; private final CGroupsHandler cgroups;

View File

@ -20,8 +20,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -32,7 +32,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
* *
*/ */
public final class NetworkTagMappingManagerFactory { public final class NetworkTagMappingManagerFactory {
private static final Log LOG = LogFactory.getLog( private static final Logger LOG = LoggerFactory.getLogger(
NetworkTagMappingManagerFactory.class); NetworkTagMappingManagerFactory.class);
private NetworkTagMappingManagerFactory() {} private NetworkTagMappingManagerFactory() {}

View File

@ -22,8 +22,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.Context;
@ -44,7 +44,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
* */ * */
public class FpgaResourceAllocator { public class FpgaResourceAllocator {
static final Log LOG = LogFactory.getLog(FpgaResourceAllocator.class); static final Logger LOG = LoggerFactory.
getLogger(FpgaResourceAllocator.class);
private List<FpgaDevice> allowedFpgas = new LinkedList<>(); private List<FpgaDevice> allowedFpgas = new LinkedList<>();

View File

@ -20,8 +20,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -50,7 +50,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
@InterfaceAudience.Private @InterfaceAudience.Private
public class FpgaResourceHandlerImpl implements ResourceHandler { public class FpgaResourceHandlerImpl implements ResourceHandler {
static final Log LOG = LogFactory.getLog(FpgaResourceHandlerImpl.class); static final Logger LOG = LoggerFactory.
getLogger(FpgaResourceHandlerImpl.class);
private final String REQUEST_FPGA_IP_ID_KEY = "REQUESTED_FPGA_IP_ID"; private final String REQUEST_FPGA_IP_ID_KEY = "REQUESTED_FPGA_IP_ID";

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
@ -52,7 +52,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
* Allocate GPU resources according to requirements * Allocate GPU resources according to requirements
*/ */
public class GpuResourceAllocator { public class GpuResourceAllocator {
final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class); final static Logger LOG = LoggerFactory.
getLogger(GpuResourceAllocator.class);
private static final int WAIT_MS_PER_LOOP = 1000; private static final int WAIT_MS_PER_LOOP = 1000;
private Set<GpuDevice> allowedGpuDevices = new TreeSet<>(); private Set<GpuDevice> allowedGpuDevices = new TreeSet<>();

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
@ -41,8 +41,8 @@ import java.util.Arrays;
import java.util.List; import java.util.List;
public class GpuResourceHandlerImpl implements ResourceHandler { public class GpuResourceHandlerImpl implements ResourceHandler {
final static Log LOG = LogFactory final static Logger LOG = LoggerFactory
.getLog(GpuResourceHandlerImpl.class); .getLogger(GpuResourceHandlerImpl.class);
// This will be used by container-executor to add necessary clis // This will be used by container-executor to add necessary clis
public static final String EXCLUDED_GPUS_CLI_OPTION = "--excluded_gpus"; public static final String EXCLUDED_GPUS_CLI_OPTION = "--excluded_gpus";

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
@ -36,7 +36,8 @@ public class NumaNodeResource {
private long usedMemory; private long usedMemory;
private int usedCpus; private int usedCpus;
private static final Log LOG = LogFactory.getLog(NumaNodeResource.class); private static final Logger LOG = LoggerFactory.
getLogger(NumaNodeResource.class);
private Map<ContainerId, Long> containerVsMemUsage = private Map<ContainerId, Long> containerVsMemUsage =
new ConcurrentHashMap<>(); new ConcurrentHashMap<>();

View File

@ -29,8 +29,8 @@ import java.util.Map.Entry;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -51,7 +51,8 @@ import com.google.common.annotations.VisibleForTesting;
*/ */
public class NumaResourceAllocator { public class NumaResourceAllocator {
private static final Log LOG = LogFactory.getLog(NumaResourceAllocator.class); private static final Logger LOG = LoggerFactory.
getLogger(NumaResourceAllocator.class);
// Regex to find node ids, Ex: 'available: 2 nodes (0-1)' // Regex to find node ids, Ex: 'available: 2 nodes (0-1)'
private static final String NUMA_NODEIDS_REGEX = private static final String NUMA_NODEIDS_REGEX =

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -39,8 +39,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resource
*/ */
public class NumaResourceHandlerImpl implements ResourceHandler { public class NumaResourceHandlerImpl implements ResourceHandler {
private static final Log LOG = LogFactory private static final Logger LOG = LoggerFactory
.getLog(NumaResourceHandlerImpl.class); .getLogger(NumaResourceHandlerImpl.class);
private final NumaResourceAllocator numaResourceAllocator; private final NumaResourceAllocator numaResourceAllocator;
private final String numaCtlCmd; private final String numaCtlCmd;

View File

@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
@ -52,7 +52,8 @@ import java.util.concurrent.ConcurrentHashMap;
* scheduler. * scheduler.
* */ * */
public class DeviceMappingManager { public class DeviceMappingManager {
static final Log LOG = LogFactory.getLog(DeviceMappingManager.class); static final Logger LOG = LoggerFactory.
getLogger(DeviceMappingManager.class);
private Context nmContext; private Context nmContext;
private static final int WAIT_MS_PER_LOOP = 1000; private static final int WAIT_MS_PER_LOOP = 1000;
@ -163,10 +164,7 @@ public class DeviceMappingManager {
ContainerId containerId = container.getContainerId(); ContainerId containerId = container.getContainerId();
int requestedDeviceCount = getRequestedDeviceCount(resourceName, int requestedDeviceCount = getRequestedDeviceCount(resourceName,
requestedResource); requestedResource);
if (LOG.isDebugEnabled()) { LOG.debug("Try allocating {} {}", requestedDeviceCount, resourceName);
LOG.debug("Try allocating " + requestedDeviceCount
+ " " + resourceName);
}
// Assign devices to container if requested some. // Assign devices to container if requested some.
if (requestedDeviceCount > 0) { if (requestedDeviceCount > 0) {
if (requestedDeviceCount > getAvailableDevices(resourceName)) { if (requestedDeviceCount > getAvailableDevices(resourceName)) {
@ -266,10 +264,8 @@ public class DeviceMappingManager {
while (iter.hasNext()) { while (iter.hasNext()) {
entry = iter.next(); entry = iter.next();
if (entry.getValue().equals(containerId)) { if (entry.getValue().equals(containerId)) {
if (LOG.isDebugEnabled()) { LOG.debug("Recycle devices: {}, type: {} from {}", entry.getKey(),
LOG.debug("Recycle devices: " + entry.getKey() resourceName, containerId);
+ ", type: " + resourceName + " from " + containerId);
}
iter.remove(); iter.remove();
} }
} }
@ -317,10 +313,8 @@ public class DeviceMappingManager {
ContainerId containerId = c.getContainerId(); ContainerId containerId = c.getContainerId();
Map<String, String> env = c.getLaunchContext().getEnvironment(); Map<String, String> env = c.getLaunchContext().getEnvironment();
if (null == dps) { if (null == dps) {
if (LOG.isDebugEnabled()) { LOG.debug("Customized device plugin scheduler is preferred "
LOG.debug("Customized device plugin scheduler is preferred " + "but not implemented, use default logic");
+ "but not implemented, use default logic");
}
defaultScheduleAction(allowed, used, defaultScheduleAction(allowed, used,
assigned, containerId, count); assigned, containerId, count);
} else { } else {

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.Context;
@ -47,7 +47,8 @@ import java.util.Map;
* *
* */ * */
public class DevicePluginAdapter implements ResourcePlugin { public class DevicePluginAdapter implements ResourcePlugin {
private final static Log LOG = LogFactory.getLog(DevicePluginAdapter.class); private final static Logger LOG = LoggerFactory.
getLogger(DevicePluginAdapter.class);
private final String resourceName; private final String resourceName;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin;
@ -47,7 +47,7 @@ import java.util.Set;
public class DeviceResourceDockerRuntimePluginImpl public class DeviceResourceDockerRuntimePluginImpl
implements DockerCommandPlugin { implements DockerCommandPlugin {
final static Log LOG = LogFactory.getLog( final static Logger LOG = LoggerFactory.getLogger(
DeviceResourceDockerRuntimePluginImpl.class); DeviceResourceDockerRuntimePluginImpl.class);
private String resourceName; private String resourceName;
@ -73,9 +73,7 @@ public class DeviceResourceDockerRuntimePluginImpl
public void updateDockerRunCommand(DockerRunCommand dockerRunCommand, public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
Container container) throws ContainerExecutionException { Container container) throws ContainerExecutionException {
String containerId = container.getContainerId().toString(); String containerId = container.getContainerId().toString();
if (LOG.isDebugEnabled()) { LOG.debug("Try to update docker run command for: {}", containerId);
LOG.debug("Try to update docker run command for: " + containerId);
}
if(!requestedDevice(resourceName, container)) { if(!requestedDevice(resourceName, container)) {
return; return;
} }
@ -89,17 +87,12 @@ public class DeviceResourceDockerRuntimePluginImpl
} }
// handle runtime // handle runtime
dockerRunCommand.addRuntime(deviceRuntimeSpec.getContainerRuntime()); dockerRunCommand.addRuntime(deviceRuntimeSpec.getContainerRuntime());
if (LOG.isDebugEnabled()) { LOG.debug("Handle docker container runtime type: {} for container: {}",
LOG.debug("Handle docker container runtime type: " deviceRuntimeSpec.getContainerRuntime(), containerId);
+ deviceRuntimeSpec.getContainerRuntime() + " for container: "
+ containerId);
}
// handle device mounts // handle device mounts
Set<MountDeviceSpec> deviceMounts = deviceRuntimeSpec.getDeviceMounts(); Set<MountDeviceSpec> deviceMounts = deviceRuntimeSpec.getDeviceMounts();
if (LOG.isDebugEnabled()) { LOG.debug("Handle device mounts: {} for container: {}", deviceMounts,
LOG.debug("Handle device mounts: " + deviceMounts + " for container: " containerId);
+ containerId);
}
for (MountDeviceSpec mountDeviceSpec : deviceMounts) { for (MountDeviceSpec mountDeviceSpec : deviceMounts) {
dockerRunCommand.addDevice( dockerRunCommand.addDevice(
mountDeviceSpec.getDevicePathInHost(), mountDeviceSpec.getDevicePathInHost(),
@ -107,10 +100,8 @@ public class DeviceResourceDockerRuntimePluginImpl
} }
// handle volume mounts // handle volume mounts
Set<MountVolumeSpec> mountVolumeSpecs = deviceRuntimeSpec.getVolumeMounts(); Set<MountVolumeSpec> mountVolumeSpecs = deviceRuntimeSpec.getVolumeMounts();
if (LOG.isDebugEnabled()) { LOG.debug("Handle volume mounts: {} for container: {}", mountVolumeSpecs,
LOG.debug("Handle volume mounts: " + mountVolumeSpecs + " for container: " containerId);
+ containerId);
}
for (MountVolumeSpec mountVolumeSpec : mountVolumeSpecs) { for (MountVolumeSpec mountVolumeSpec : mountVolumeSpecs) {
if (mountVolumeSpec.getReadOnly()) { if (mountVolumeSpec.getReadOnly()) {
dockerRunCommand.addReadOnlyMountLocation( dockerRunCommand.addReadOnlyMountLocation(
@ -124,10 +115,8 @@ public class DeviceResourceDockerRuntimePluginImpl
} }
// handle envs // handle envs
dockerRunCommand.addEnv(deviceRuntimeSpec.getEnvs()); dockerRunCommand.addEnv(deviceRuntimeSpec.getEnvs());
if (LOG.isDebugEnabled()) { LOG.debug("Handle envs: {} for container: {}",
LOG.debug("Handle envs: " + deviceRuntimeSpec.getEnvs() deviceRuntimeSpec.getEnvs(), containerId);
+ " for container: " + containerId);
}
} }
@Override @Override
@ -147,10 +136,8 @@ public class DeviceResourceDockerRuntimePluginImpl
DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND); DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND);
command.setDriverName(volumeSec.getVolumeDriver()); command.setDriverName(volumeSec.getVolumeDriver());
command.setVolumeName(volumeSec.getVolumeName()); command.setVolumeName(volumeSec.getVolumeName());
if (LOG.isDebugEnabled()) { LOG.debug("Get volume create request from plugin:{} for container: {}",
LOG.debug("Get volume create request from plugin:" + volumeClaims volumeClaims, container.getContainerId());
+ " for container: " + container.getContainerId().toString());
}
return command; return command;
} }
} }
@ -195,10 +182,8 @@ public class DeviceResourceDockerRuntimePluginImpl
allocated = devicePluginAdapter allocated = devicePluginAdapter
.getDeviceMappingManager() .getDeviceMappingManager()
.getAllocatedDevices(resourceName, containerId); .getAllocatedDevices(resourceName, containerId);
if (LOG.isDebugEnabled()) { LOG.debug("Get allocation from deviceMappingManager: {}, {} for"
LOG.debug("Get allocation from deviceMappingManager: " + " container: {}", allocated, resourceName, containerId);
+ allocated + ", " + resourceName + " for container: " + containerId);
}
cachedAllocation.put(containerId, allocated); cachedAllocation.put(containerId, allocated);
return allocated; return allocated;
} }

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
@ -53,7 +53,8 @@ import java.util.Set;
* */ * */
public class DeviceResourceHandlerImpl implements ResourceHandler { public class DeviceResourceHandlerImpl implements ResourceHandler {
static final Log LOG = LogFactory.getLog(DeviceResourceHandlerImpl.class); static final Logger LOG = LoggerFactory.
getLogger(DeviceResourceHandlerImpl.class);
private final String resourceName; private final String resourceName;
private final DevicePlugin devicePlugin; private final DevicePlugin devicePlugin;
@ -134,10 +135,7 @@ public class DeviceResourceHandlerImpl implements ResourceHandler {
String containerIdStr = container.getContainerId().toString(); String containerIdStr = container.getContainerId().toString();
DeviceMappingManager.DeviceAllocation allocation = DeviceMappingManager.DeviceAllocation allocation =
deviceMappingManager.assignDevices(resourceName, container); deviceMappingManager.assignDevices(resourceName, container);
if (LOG.isDebugEnabled()) { LOG.debug("Allocated to {}: {}", containerIdStr, allocation);
LOG.debug("Allocated to "
+ containerIdStr + ": " + allocation);
}
DeviceRuntimeSpec spec; DeviceRuntimeSpec spec;
try { try {
spec = devicePlugin.onDevicesAllocated( spec = devicePlugin.onDevicesAllocated(
@ -291,13 +289,9 @@ public class DeviceResourceHandlerImpl implements ResourceHandler {
} }
DeviceType deviceType; DeviceType deviceType;
try { try {
if (LOG.isDebugEnabled()) { LOG.debug("Try to get device type from device path: {}", devName);
LOG.debug("Try to get device type from device path: " + devName);
}
String output = shellWrapper.getDeviceFileType(devName); String output = shellWrapper.getDeviceFileType(devName);
if (LOG.isDebugEnabled()) { LOG.debug("stat output:{}", output);
LOG.debug("stat output:" + output);
}
deviceType = output.startsWith("c") ? DeviceType.CHAR : DeviceType.BLOCK; deviceType = output.startsWith("c") ? DeviceType.CHAR : DeviceType.BLOCK;
} catch (IOException e) { } catch (IOException e) {
String msg = String msg =

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
@ -33,7 +33,8 @@ import java.util.Set;
* */ * */
public class DeviceResourceUpdaterImpl extends NodeResourceUpdaterPlugin { public class DeviceResourceUpdaterImpl extends NodeResourceUpdaterPlugin {
final static Log LOG = LogFactory.getLog(DeviceResourceUpdaterImpl.class); final static Logger LOG = LoggerFactory.
getLogger(DeviceResourceUpdaterImpl.class);
private String resourceName; private String resourceName;
private DevicePlugin devicePlugin; private DevicePlugin devicePlugin;

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga; package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -37,7 +37,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMResourceInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMResourceInfo;
public class FpgaResourcePlugin implements ResourcePlugin { public class FpgaResourcePlugin implements ResourcePlugin {
private static final Log LOG = LogFactory.getLog(FpgaResourcePlugin.class); private static final Logger LOG = LoggerFactory.
getLogger(FpgaResourcePlugin.class);
private ResourceHandler fpgaResourceHandler = null; private ResourceHandler fpgaResourceHandler = null;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugi
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -50,7 +50,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
* Implementation to use nvidia-docker v1 as GPU docker command plugin. * Implementation to use nvidia-docker v1 as GPU docker command plugin.
*/ */
public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin { public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
final static Log LOG = LogFactory.getLog(NvidiaDockerV1CommandPlugin.class); final static Logger LOG = LoggerFactory.
getLogger(NvidiaDockerV1CommandPlugin.class);
private Configuration conf; private Configuration conf;
private Map<String, Set<String>> additionalCommands = null; private Map<String, Set<String>> additionalCommands = null;
@ -121,9 +122,7 @@ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
addToCommand(DEVICE_OPTION, getValue(str)); addToCommand(DEVICE_OPTION, getValue(str));
} else if (str.startsWith(VOLUME_DRIVER_OPTION)) { } else if (str.startsWith(VOLUME_DRIVER_OPTION)) {
volumeDriver = getValue(str); volumeDriver = getValue(str);
if (LOG.isDebugEnabled()) { LOG.debug("Found volume-driver:{}", volumeDriver);
LOG.debug("Found volume-driver:" + volumeDriver);
}
} else if (str.startsWith(MOUNT_RO_OPTION)) { } else if (str.startsWith(MOUNT_RO_OPTION)) {
String mount = getValue(str); String mount = getValue(str);
if (!mount.endsWith(":ro")) { if (!mount.endsWith(":ro")) {
@ -286,15 +285,11 @@ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
if (VOLUME_NAME_PATTERN.matcher(mountSource).matches()) { if (VOLUME_NAME_PATTERN.matcher(mountSource).matches()) {
// This is a valid named volume // This is a valid named volume
newVolumeName = mountSource; newVolumeName = mountSource;
if (LOG.isDebugEnabled()) { LOG.debug("Found volume name for GPU:{}", newVolumeName);
LOG.debug("Found volume name for GPU:" + newVolumeName);
}
break; break;
} else{ } else{
if (LOG.isDebugEnabled()) { LOG.debug("Failed to match {} to named-volume regex pattern",
LOG.debug("Failed to match " + mountSource mountSource);
+ " to named-volume regex pattern");
}
} }
} }
} }

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
@ -41,7 +41,8 @@ import java.util.Set;
* Implementation to use nvidia-docker v2 as GPU docker command plugin. * Implementation to use nvidia-docker v2 as GPU docker command plugin.
*/ */
public class NvidiaDockerV2CommandPlugin implements DockerCommandPlugin { public class NvidiaDockerV2CommandPlugin implements DockerCommandPlugin {
final static Log LOG = LogFactory.getLog(NvidiaDockerV2CommandPlugin.class); final static Logger LOG = LoggerFactory.
getLogger(NvidiaDockerV2CommandPlugin.class);
private String nvidiaRuntime = "nvidia"; private String nvidiaRuntime = "nvidia";
private String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES"; private String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES";

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager; package org.apache.hadoop.yarn.server.nodemanager;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -33,8 +33,8 @@ import java.lang.management.ManagementFactory;
* Class for testing {@link NodeManagerMXBean} implementation. * Class for testing {@link NodeManagerMXBean} implementation.
*/ */
public class TestNodeManagerMXBean { public class TestNodeManagerMXBean {
public static final Log LOG = LogFactory.getLog( public static final Logger LOG = LoggerFactory.getLogger(
TestNodeManagerMXBean.class); TestNodeManagerMXBean.class);
@Test @Test
public void testNodeManagerMXBean() throws Exception { public void testNodeManagerMXBean() throws Exception {

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@ -43,8 +43,8 @@ import static org.mockito.Mockito.when;
* Test for elastic non-strict memory controller based on cgroups. * Test for elastic non-strict memory controller based on cgroups.
*/ */
public class TestCGroupElasticMemoryController { public class TestCGroupElasticMemoryController {
protected static final Log LOG = LogFactory protected static final Logger LOG = LoggerFactory
.getLog(TestCGroupElasticMemoryController.class); .getLogger(TestCGroupElasticMemoryController.class);
private YarnConfiguration conf = new YarnConfiguration(); private YarnConfiguration conf = new YarnConfiguration();
private File script = new File("target/" + private File script = new File("target/" +
TestCGroupElasticMemoryController.class.getName()); TestCGroupElasticMemoryController.class.getName());

View File

@ -92,10 +92,8 @@ public class AbstractAutoCreatedLeafQueue extends LeafQueue {
// note: we currently set maxCapacity to capacity // note: we currently set maxCapacity to capacity
// this might be revised later // this might be revised later
setMaxCapacity(nodeLabel, entitlement.getMaxCapacity()); setMaxCapacity(nodeLabel, entitlement.getMaxCapacity());
if (LOG.isDebugEnabled()) { LOG.debug("successfully changed to {} for queue {}", capacity, this
LOG.debug("successfully changed to {} for queue {}", capacity, this
.getQueueName()); .getQueueName());
}
//update queue used capacity etc //update queue used capacity etc
CSQueueUtils.updateQueueStatistics(resourceCalculator, CSQueueUtils.updateQueueStatistics(resourceCalculator,

View File

@ -1053,10 +1053,8 @@ public class CapacityScheduler extends
+ " to scheduler from user " + application.getUser() + " in queue " + " to scheduler from user " + application.getUser() + " in queue "
+ queue.getQueueName()); + queue.getQueueName());
if (isAttemptRecovering) { if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) { LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
LOG.debug(applicationAttemptId applicationAttemptId);
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
} else{ } else{
rmContext.getDispatcher().getEventHandler().handle( rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(applicationAttemptId, new RMAppAttemptEvent(applicationAttemptId,

View File

@ -593,10 +593,8 @@ public class ParentQueue extends AbstractCSQueue {
NodeType.NODE_LOCAL); NodeType.NODE_LOCAL);
while (canAssign(clusterResource, node)) { while (canAssign(clusterResource, node)) {
if (LOG.isDebugEnabled()) { LOG.debug("Trying to assign containers to child-queue of {}",
LOG.debug("Trying to assign containers to child-queue of " getQueueName());
+ getQueueName());
}
// Are we over maximum-capacity for this queue? // Are we over maximum-capacity for this queue?
// This will also consider parent's limits and also continuous reservation // This will also consider parent's limits and also continuous reservation
@ -781,10 +779,8 @@ public class ParentQueue extends AbstractCSQueue {
for (Iterator<CSQueue> iter = sortAndGetChildrenAllocationIterator( for (Iterator<CSQueue> iter = sortAndGetChildrenAllocationIterator(
candidates.getPartition()); iter.hasNext(); ) { candidates.getPartition()); iter.hasNext(); ) {
CSQueue childQueue = iter.next(); CSQueue childQueue = iter.next();
if(LOG.isDebugEnabled()) { LOG.debug("Trying to assign to queue: {} stats: {}",
LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath() childQueue.getQueuePath(), childQueue);
+ " stats: " + childQueue);
}
// Get ResourceLimits of child queue before assign containers // Get ResourceLimits of child queue before assign containers
ResourceLimits childLimits = ResourceLimits childLimits =

View File

@ -221,15 +221,10 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
+ parentQueue.getQueueName(), e); + parentQueue.getQueueName(), e);
} }
} else{ } else{
if (LOG.isDebugEnabled()) { LOG.debug("Skipping queue management updates for parent queue {} "
LOG.debug( + "since configuration for auto creating queues beyond "
"Skipping queue management updates for parent queue " + "parent's guaranteed capacity is disabled",
+ parentQueue parentQueue.getQueuePath());
.getQueuePath() + " "
+ "since configuration for auto creating queues beyond "
+ "parent's "
+ "guaranteed capacity is disabled");
}
} }
return queueManagementChanges; return queueManagementChanges;
} }

View File

@ -669,19 +669,15 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
if (updatedQueueTemplate.getQueueCapacities(). if (updatedQueueTemplate.getQueueCapacities().
getCapacity(nodeLabel) > 0) { getCapacity(nodeLabel) > 0) {
if (isActive(leafQueue, nodeLabel)) { if (isActive(leafQueue, nodeLabel)) {
if (LOG.isDebugEnabled()) { LOG.debug("Queue is already active. Skipping activation : {}",
LOG.debug("Queue is already active." + " Skipping activation : " leafQueue.getQueueName());
+ leafQueue.getQueueName());
}
} else{ } else{
activate(leafQueue, nodeLabel); activate(leafQueue, nodeLabel);
} }
} else{ } else{
if (!isActive(leafQueue, nodeLabel)) { if (!isActive(leafQueue, nodeLabel)) {
if (LOG.isDebugEnabled()) { LOG.debug("Queue is already de-activated. Skipping "
LOG.debug("Queue is already de-activated. Skipping " + "de-activation : {}", leafQueue.getQueueName());
+ "de-activation : " + leafQueue.getQueueName());
}
} else{ } else{
deactivate(leafQueue, nodeLabel); deactivate(leafQueue, nodeLabel);
} }

View File

@ -154,18 +154,13 @@ public final class PlacementConstraintsUtil {
if (schedulerNode.getNodeAttributes() == null || if (schedulerNode.getNodeAttributes() == null ||
!schedulerNode.getNodeAttributes().contains(requestAttribute)) { !schedulerNode.getNodeAttributes().contains(requestAttribute)) {
if (opCode == NodeAttributeOpCode.NE) { if (opCode == NodeAttributeOpCode.NE) {
if (LOG.isDebugEnabled()) { LOG.debug("Incoming requestAttribute:{} is not present in {},"
LOG.debug("Incoming requestAttribute:" + requestAttribute + " however opcode is NE. Hence accept this node.",
+ "is not present in " + schedulerNode.getNodeID() requestAttribute, schedulerNode.getNodeID());
+ ", however opcode is NE. Hence accept this node.");
}
return true; return true;
} }
if (LOG.isDebugEnabled()) { LOG.debug("Incoming requestAttribute:{} is not present in {},"
LOG.debug("Incoming requestAttribute:" + requestAttribute + " skip such node.", requestAttribute, schedulerNode.getNodeID());
+ "is not present in " + schedulerNode.getNodeID()
+ ", skip such node.");
}
return false; return false;
} }
@ -183,21 +178,16 @@ public final class PlacementConstraintsUtil {
} }
if (requestAttribute.equals(nodeAttribute)) { if (requestAttribute.equals(nodeAttribute)) {
if (isOpCodeMatches(requestAttribute, nodeAttribute, opCode)) { if (isOpCodeMatches(requestAttribute, nodeAttribute, opCode)) {
if (LOG.isDebugEnabled()) { LOG.debug("Incoming requestAttribute:{} matches with node:{}",
LOG.debug( requestAttribute, schedulerNode.getNodeID());
"Incoming requestAttribute:" + requestAttribute
+ " matches with node:" + schedulerNode.getNodeID());
}
found = true; found = true;
return found; return found;
} }
} }
} }
if (!found) { if (!found) {
if (LOG.isDebugEnabled()) { LOG.debug("skip this node:{} for requestAttribute:{}",
LOG.info("skip this node:" + schedulerNode.getNodeID() schedulerNode.getNodeID(), requestAttribute);
+ " for requestAttribute:" + requestAttribute);
}
return false; return false;
} }
return true; return true;

View File

@ -426,10 +426,8 @@ public abstract class FSQueue implements Queue, Schedulable {
*/ */
boolean assignContainerPreCheck(FSSchedulerNode node) { boolean assignContainerPreCheck(FSSchedulerNode node) {
if (node.getReservedContainer() != null) { if (node.getReservedContainer() != null) {
if (LOG.isDebugEnabled()) { LOG.debug("Assigning container failed on node '{}' because it has"
LOG.debug("Assigning container failed on node '" + node.getNodeName() + " reserved containers.", node.getNodeName());
+ " because it has reserved containers.");
}
return false; return false;
} else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) { } else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {

View File

@ -397,9 +397,8 @@ public class FifoScheduler extends
LOG.info("Accepted application " + applicationId + " from user: " + user LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", currently num of applications: " + applications.size()); + ", currently num of applications: " + applications.size());
if (isAppRecovering) { if (isAppRecovering) {
if (LOG.isDebugEnabled()) { LOG.debug("{} is recovering. Skip notifying APP_ACCEPTED",
LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED"); applicationId);
}
} else { } else {
rmContext.getDispatcher().getEventHandler() rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED)); .handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
@ -429,10 +428,8 @@ public class FifoScheduler extends
LOG.info("Added Application Attempt " + appAttemptId LOG.info("Added Application Attempt " + appAttemptId
+ " to scheduler from user " + application.getUser()); + " to scheduler from user " + application.getUser());
if (isAttemptRecovering) { if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) { LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
LOG.debug(appAttemptId appAttemptId);
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
} else { } else {
rmContext.getDispatcher().getEventHandler().handle( rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(appAttemptId, new RMAppAttemptEvent(appAttemptId,

View File

@ -396,10 +396,8 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
SchedulingMode schedulingMode) { SchedulingMode schedulingMode) {
// We will only look at node label = nodeLabelToLookAt according to // We will only look at node label = nodeLabelToLookAt according to
// schedulingMode and partition of node. // schedulingMode and partition of node.
if(LOG.isDebugEnabled()) { LOG.debug("precheckNode is invoked for {},{}", schedulerNode.getNodeID(),
LOG.debug("precheckNode is invoked for " + schedulerNode.getNodeID() + "," schedulingMode);
+ schedulingMode);
}
String nodePartitionToLookAt; String nodePartitionToLookAt;
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) { if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
nodePartitionToLookAt = schedulerNode.getPartition(); nodePartitionToLookAt = schedulerNode.getPartition();

View File

@ -223,10 +223,8 @@ public class Application {
if (requests == null) { if (requests == null) {
requests = new HashMap<String, ResourceRequest>(); requests = new HashMap<String, ResourceRequest>();
this.requests.put(schedulerKey, requests); this.requests.put(schedulerKey, requests);
if(LOG.isDebugEnabled()) { LOG.debug("Added priority={} application={}", schedulerKey.getPriority(),
LOG.debug("Added priority=" + schedulerKey.getPriority() applicationId);
+ " application="+ applicationId);
}
} }
final Resource capability = requestSpec.get(schedulerKey); final Resource capability = requestSpec.get(schedulerKey);
@ -242,10 +240,7 @@ public class Application {
LOG.info("Added task " + task.getTaskId() + " to application " + LOG.info("Added task " + task.getTaskId() + " to application " +
applicationId + " at priority " + schedulerKey.getPriority()); applicationId + " at priority " + schedulerKey.getPriority());
if(LOG.isDebugEnabled()) { LOG.debug("addTask: application={} #asks={}", applicationId, ask.size());
LOG.debug("addTask: application=" + applicationId
+ " #asks=" + ask.size());
}
// Create resource requests // Create resource requests
for (String host : task.getHosts()) { for (String host : task.getHosts()) {
@ -320,12 +315,12 @@ public class Application {
public synchronized List<Container> getResources() throws IOException { public synchronized List<Container> getResources() throws IOException {
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("getResources begin:" + " application=" + applicationId LOG.debug("getResources begin: application={} #ask={}",
+ " #ask=" + ask.size()); applicationId, ask.size());
for (ResourceRequest request : ask) { for (ResourceRequest request : ask) {
LOG.debug("getResources:" + " application=" + applicationId LOG.debug("getResources: application={} ask-request={}",
+ " ask-request=" + request); applicationId, request);
} }
} }
@ -346,8 +341,8 @@ public class Application {
ask.clear(); ask.clear();
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("getResources() for " + applicationId + ":" LOG.debug("getResources() for {}: ask={} received={}",
+ " ask=" + ask.size() + " received=" + containers.size()); applicationId, ask.size(), containers.size());
} }
return containers; return containers;
@ -451,10 +446,8 @@ public class Application {
updateResourceRequest(requests.get(ResourceRequest.ANY)); updateResourceRequest(requests.get(ResourceRequest.ANY));
if(LOG.isDebugEnabled()) { LOG.debug("updateResourceDemands: application={} #asks={}",
LOG.debug("updateResourceDemands:" + " application=" + applicationId applicationId, ask.size());
+ " #asks=" + ask.size());
}
} }
private void updateResourceRequest(ResourceRequest request) { private void updateResourceRequest(ResourceRequest request) {

View File

@ -107,9 +107,7 @@ class FlowScanner implements RegionScanner, Closeable {
YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD, YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD); YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
} }
if (LOG.isDebugEnabled()) { LOG.debug(" batch size={}", batchSize);
LOG.debug(" batch size=" + batchSize);
}
} }