From 9dcb4268197bfeeb1b31647e8ef51ee9a18b6db1 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 17 Aug 2015 10:16:26 -0700 Subject: [PATCH] HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (cherry picked from commit 3fe758436041d55da1ccde4d5109da38d1f5110f) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../blockmanagement/DatanodeDescriptor.java | 40 ++++++++++--------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 596eb95cd75..5770200fc8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -445,6 +445,8 @@ Release 2.8.0 - UNRELEASED HDFS-8824. Do not use small blocks for balancing the cluster. (szetszwo) + HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index ca5b541fe8e..c7658db5aaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -33,8 +33,6 @@ import java.util.Set; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; @@ -50,6 +48,8 @@ import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; @@ -61,7 +61,8 @@ import com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeDescriptor extends DatanodeInfo { - public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class); + public static final Logger LOG = + LoggerFactory.getLogger(DatanodeDescriptor.class); public static final DatanodeDescriptor[] EMPTY_ARRAY = {}; // Stores status of decommissioning. @@ -321,9 +322,9 @@ public class DatanodeDescriptor extends DatanodeInfo { Map.Entry entry = iter.next(); DatanodeStorageInfo storageInfo = entry.getValue(); if (storageInfo.getLastBlockReportId() != curBlockReportId) { - LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" + - Long.toHexString(storageInfo.getLastBlockReportId()) + - ", but curBlockReportId = 0x" + + LOG.info("{} had lastBlockReportId 0x{} but curBlockReportId = 0x{}", + storageInfo.getStorageID(), + Long.toHexString(storageInfo.getLastBlockReportId()), Long.toHexString(curBlockReportId)); iter.remove(); if (zombies == null) { @@ -448,8 +449,10 @@ public class DatanodeDescriptor extends DatanodeInfo { } if (checkFailedStorages) { - LOG.info("Number of failed storage changes from " - + this.volumeFailures + " to " + volFailures); + if (this.volumeFailures != volFailures) { + LOG.info("Number of failed storages changes from {} to {}", + this.volumeFailures, volFailures); + } synchronized (storageMap) { failedStorageInfos = new HashSet<>(storageMap.values()); @@ -500,10 +503,9 @@ public class DatanodeDescriptor extends DatanodeInfo { */ private void pruneStorageMap(final StorageReport[] reports) { synchronized (storageMap) { - if (LOG.isDebugEnabled()) { - LOG.debug("Number of storages reported in heartbeat=" + reports.length - + "; Number of storages in storageMap=" + storageMap.size()); - } + LOG.debug("Number of storages reported in heartbeat={};" + + " Number of storages in storageMap={}", reports.length, + storageMap.size()); HashMap excessStorages; @@ -520,11 +522,11 @@ public class DatanodeDescriptor extends DatanodeInfo { for (final DatanodeStorageInfo storageInfo : excessStorages.values()) { if (storageInfo.numBlocks() == 0) { storageMap.remove(storageInfo.getStorageID()); - LOG.info("Removed storage " + storageInfo + " from DataNode" + this); - } else if (LOG.isDebugEnabled()) { + LOG.info("Removed storage {} from DataNode {}", storageInfo, this); + } else { // This can occur until all block reports are received. - LOG.debug("Deferring removal of stale storage " + storageInfo - + " with " + storageInfo.numBlocks() + " blocks"); + LOG.debug("Deferring removal of stale storage {} with {} blocks", + storageInfo, storageInfo.numBlocks()); } } } @@ -534,7 +536,7 @@ public class DatanodeDescriptor extends DatanodeInfo { Set failedStorageInfos) { for (DatanodeStorageInfo storageInfo : failedStorageInfos) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { - LOG.info(storageInfo + " failed."); + LOG.info("{} failed.", storageInfo); storageInfo.setState(DatanodeStorage.State.FAILED); } } @@ -859,8 +861,8 @@ public class DatanodeDescriptor extends DatanodeInfo { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (storage == null) { - LOG.info("Adding new storage ID " + s.getStorageID() + - " for DN " + getXferAddr()); + LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(), + getXferAddr()); storage = new DatanodeStorageInfo(this, s); storageMap.put(s.getStorageID(), storage); } else if (storage.getState() != s.getState() ||