HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su.

This commit is contained in:
Andrew Wang 2015-04-01 12:53:25 -07:00
parent a3a96a07fa
commit ed72daa5df
12 changed files with 78 additions and 43 deletions

View File

@ -373,6 +373,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8009. Signal congestion on the DataNode. (wheat9) HDFS-8009. Signal congestion on the DataNode. (wheat9)
HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..).
(Walter Su via wang)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -369,9 +369,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return null; return null;
} }
if (clientContext.getDisableLegacyBlockReaderLocal()) { if (clientContext.getDisableLegacyBlockReaderLocal()) {
PerformanceAdvisory.LOG.debug(this + ": can't construct " + PerformanceAdvisory.LOG.debug("{}: can't construct " +
"BlockReaderLocalLegacy because " + "BlockReaderLocalLegacy because " +
"disableLegacyBlockReaderLocal is set."); "disableLegacyBlockReaderLocal is set.", this);
return null; return null;
} }
IOException ioe = null; IOException ioe = null;
@ -410,8 +410,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
getPathInfo(inetSocketAddress, conf); getPathInfo(inetSocketAddress, conf);
} }
if (!pathInfo.getPathState().getUsableForShortCircuit()) { if (!pathInfo.getPathState().getUsableForShortCircuit()) {
PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " + PerformanceAdvisory.LOG.debug("{}: {} is not usable for short circuit; " +
"usable for short circuit; giving up on BlockReaderLocal."); "giving up on BlockReaderLocal.", this, pathInfo);
return null; return null;
} }
ShortCircuitCache cache = clientContext.getShortCircuitCache(); ShortCircuitCache cache = clientContext.getShortCircuitCache();
@ -426,11 +426,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
throw exc; throw exc;
} }
if (info.getReplica() == null) { if (info.getReplica() == null) {
if (LOG.isTraceEnabled()) { PerformanceAdvisory.LOG.debug("{}: failed to get " +
PerformanceAdvisory.LOG.debug(this + ": failed to get " +
"ShortCircuitReplica. Cannot construct " + "ShortCircuitReplica. Cannot construct " +
"BlockReaderLocal via " + pathInfo.getPath()); "BlockReaderLocal via {}", this, pathInfo.getPath());
}
return null; return null;
} }
return new BlockReaderLocal.Builder(conf). return new BlockReaderLocal.Builder(conf).
@ -610,9 +608,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
getPathInfo(inetSocketAddress, conf); getPathInfo(inetSocketAddress, conf);
} }
if (!pathInfo.getPathState().getUsableForDataTransfer()) { if (!pathInfo.getPathState().getUsableForDataTransfer()) {
PerformanceAdvisory.LOG.debug(this + ": not trying to create a " + PerformanceAdvisory.LOG.debug("{}: not trying to create a " +
"remote block reader because the UNIX domain socket at " + "remote block reader because the UNIX domain socket at {}" +
pathInfo + " is not usable."); " is not usable.", this, pathInfo);
return null; return null;
} }
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {

View File

@ -319,14 +319,18 @@ public class HAUtil {
buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+ "//" + specificToken.getService()); + "//" + specificToken.getService());
ugi.addToken(alias, specificToken); ugi.addToken(alias, specificToken);
if (LOG.isDebugEnabled()) {
LOG.debug("Mapped HA service delegation token for logical URI " + LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr); haUri + " to namenode " + singleNNAddr);
} }
}
} else { } else {
if (LOG.isDebugEnabled()) {
LOG.debug("No HA service delegation token found for logical URI " + LOG.debug("No HA service delegation token found for logical URI " +
haUri); haUri);
} }
} }
}
/** /**
* Get the internet address of the currently-active NN. This should rarely be * Get the internet address of the currently-active NN. This should rarely be

View File

@ -580,10 +580,12 @@ class BPServiceActor implements Runnable {
long createCost = createTime - startTime; long createCost = createTime - startTime;
long sendCost = sendTime - createTime; long sendCost = sendTime - createTime;
dn.getMetrics().addCacheReport(sendCost); dn.getMetrics().addCacheReport(sendCost);
if (LOG.isDebugEnabled()) {
LOG.debug("CacheReport of " + blockIds.size() LOG.debug("CacheReport of " + blockIds.size()
+ " block(s) took " + createCost + " msec to generate and " + " block(s) took " + createCost + " msec to generate and "
+ sendCost + " msecs for RPC and NN processing"); + sendCost + " msecs for RPC and NN processing");
} }
}
return cmd; return cmd;
} }

View File

@ -319,9 +319,11 @@ public class FsDatasetCache {
mappableBlockMap.put(key, mappableBlockMap.put(key,
new Value(prevValue.mappableBlock, State.UNCACHING)); new Value(prevValue.mappableBlock, State.UNCACHING));
if (deferred) { if (deferred) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} is anchored, and can't be uncached now. Scheduling it " + LOG.debug("{} is anchored, and can't be uncached now. Scheduling it " +
"for uncaching in {} ", "for uncaching in {} ",
key, DurationFormatUtils.formatDurationHMS(revocationPollingMs)); key, DurationFormatUtils.formatDurationHMS(revocationPollingMs));
}
deferredUncachingExecutor.schedule( deferredUncachingExecutor.schedule(
new UncachingTask(key, revocationMs), new UncachingTask(key, revocationMs),
revocationPollingMs, TimeUnit.MILLISECONDS); revocationPollingMs, TimeUnit.MILLISECONDS);

View File

@ -320,9 +320,11 @@ public class FileJournalManager implements JournalManager {
Collection<EditLogInputStream> streams, long fromTxId, Collection<EditLogInputStream> streams, long fromTxId,
boolean inProgressOk) throws IOException { boolean inProgressOk) throws IOException {
List<EditLogFile> elfs = matchEditLogs(sd.getCurrentDir()); List<EditLogFile> elfs = matchEditLogs(sd.getCurrentDir());
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": selecting input streams starting at " + fromTxId + LOG.debug(this + ": selecting input streams starting at " + fromTxId +
(inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") + (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
"from among " + elfs.size() + " candidate file(s)"); "from among " + elfs.size() + " candidate file(s)");
}
addStreamsToCollectionFromFiles(elfs, streams, fromTxId, inProgressOk); addStreamsToCollectionFromFiles(elfs, streams, fromTxId, inProgressOk);
} }
@ -331,8 +333,10 @@ public class FileJournalManager implements JournalManager {
for (EditLogFile elf : elfs) { for (EditLogFile elf : elfs) {
if (elf.isInProgress()) { if (elf.isInProgress()) {
if (!inProgressOk) { if (!inProgressOk) {
if (LOG.isDebugEnabled()) {
LOG.debug("passing over " + elf + " because it is in progress " + LOG.debug("passing over " + elf + " because it is in progress " +
"and we are ignoring in-progress logs."); "and we are ignoring in-progress logs.");
}
continue; continue;
} }
try { try {
@ -345,9 +349,11 @@ public class FileJournalManager implements JournalManager {
} }
if (elf.lastTxId < fromTxId) { if (elf.lastTxId < fromTxId) {
assert elf.lastTxId != HdfsConstants.INVALID_TXID; assert elf.lastTxId != HdfsConstants.INVALID_TXID;
if (LOG.isDebugEnabled()) {
LOG.debug("passing over " + elf + " because it ends at " + LOG.debug("passing over " + elf + " because it ends at " +
elf.lastTxId + ", but we only care about transactions " + elf.lastTxId + ", but we only care about transactions " +
"as new as " + fromTxId); "as new as " + fromTxId);
}
continue; continue;
} }
EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(), EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),

View File

@ -1534,9 +1534,11 @@ public class NameNode implements NameNodeStatusMXBean {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString()); conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
if (LOG.isDebugEnabled()) {
LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString()); LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString());
} }
} }
}
/** /**
* Get the name service Id for the node * Get the name service Id for the node

View File

@ -446,7 +446,9 @@ public class ShortCircuitCache implements Closeable {
purgeReason = "purging replica because it is stale."; purgeReason = "purging replica because it is stale.";
} }
if (purgeReason != null) { if (purgeReason != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": " + purgeReason); LOG.debug(this + ": " + purgeReason);
}
purge(replica); purge(replica);
} }
} }

View File

@ -147,8 +147,10 @@ class FSImageLoader {
summary.getCodec(), new BufferedInputStream(new LimitInputStream( summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, s.getLength()))); fin, s.getLength())));
if (LOG.isDebugEnabled()) {
LOG.debug("Loading section " + s.getName() + " length: " + s.getLength LOG.debug("Loading section " + s.getName() + " length: " + s.getLength
()); ());
}
switch (FSImageFormatProtobuf.SectionName.fromString(s.getName())) { switch (FSImageFormatProtobuf.SectionName.fromString(s.getName())) {
case STRING_TABLE: case STRING_TABLE:
stringTable = loadStringTable(is); stringTable = loadStringTable(is);

View File

@ -126,9 +126,11 @@ public class LightWeightHashSet<T> implements Collection<T> {
this.shrinkThreshold = (int) (capacity * minLoadFactor); this.shrinkThreshold = (int) (capacity * minLoadFactor);
entries = new LinkedElement[capacity]; entries = new LinkedElement[capacity];
if (LOG.isDebugEnabled()) {
LOG.debug("initial capacity=" + initialCapacity + ", max load factor= " LOG.debug("initial capacity=" + initialCapacity + ", max load factor= "
+ maxLoadFactor + ", min load factor= " + minLoadFactor); + maxLoadFactor + ", min load factor= " + minLoadFactor);
} }
}
public LightWeightHashSet() { public LightWeightHashSet() {
this(MINIMUM_CAPACITY, DEFAULT_MAX_LOAD_FACTOR, DEFAUT_MIN_LOAD_FACTOR); this(MINIMUM_CAPACITY, DEFAULT_MAX_LOAD_FACTOR, DEFAUT_MIN_LOAD_FACTOR);

View File

@ -136,8 +136,10 @@ final class TokenAspect<T extends FileSystem & Renewable> {
if (token != null) { if (token != null) {
fs.setDelegationToken(token); fs.setDelegationToken(token);
addRenewAction(fs); addRenewAction(fs);
if(LOG.isDebugEnabled()) {
LOG.debug("Created new DT for " + token.getService()); LOG.debug("Created new DT for " + token.getService());
} }
}
hasInitedToken = true; hasInitedToken = true;
} }
} }
@ -149,7 +151,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
synchronized void initDelegationToken(UserGroupInformation ugi) { synchronized void initDelegationToken(UserGroupInformation ugi) {
Token<?> token = selectDelegationToken(ugi); Token<?> token = selectDelegationToken(ugi);
if (token != null) { if (token != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Found existing DT for " + token.getService()); LOG.debug("Found existing DT for " + token.getService());
}
fs.setDelegationToken(token); fs.setDelegationToken(token);
hasInitedToken = true; hasInitedToken = true;
} }

View File

@ -224,12 +224,16 @@ public class WebHdfsFileSystem extends FileSystem
// refetch tokens. even if ugi has credentials, don't attempt // refetch tokens. even if ugi has credentials, don't attempt
// to get another token to match hdfs/rpc behavior // to get another token to match hdfs/rpc behavior
if (token != null) { if (token != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Using UGI token: " + token); LOG.debug("Using UGI token: " + token);
}
canRefreshDelegationToken = false; canRefreshDelegationToken = false;
} else { } else {
token = getDelegationToken(null); token = getDelegationToken(null);
if (token != null) { if (token != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Fetched new token: " + token); LOG.debug("Fetched new token: " + token);
}
} else { // security is disabled } else { // security is disabled
canRefreshDelegationToken = false; canRefreshDelegationToken = false;
} }
@ -244,7 +248,9 @@ public class WebHdfsFileSystem extends FileSystem
boolean replaced = false; boolean replaced = false;
if (canRefreshDelegationToken) { if (canRefreshDelegationToken) {
Token<?> token = getDelegationToken(null); Token<?> token = getDelegationToken(null);
if(LOG.isDebugEnabled()) {
LOG.debug("Replaced expired token: " + token); LOG.debug("Replaced expired token: " + token);
}
setDelegationToken(token); setDelegationToken(token);
replaced = (token != null); replaced = (token != null);
} }
@ -1188,7 +1194,9 @@ public class WebHdfsFileSystem extends FileSystem
cancelDelegationToken(delegationToken); cancelDelegationToken(delegationToken);
} }
} catch (IOException ioe) { } catch (IOException ioe) {
if (LOG.isDebugEnabled()) {
LOG.debug("Token cancel failed: " + ioe); LOG.debug("Token cancel failed: " + ioe);
}
} finally { } finally {
super.close(); super.close();
} }