HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging.

(cherry picked from commit 3fe758436041d55da1ccde4d5109da38d1f5110f)
This commit is contained in:
Andrew Wang 2015-08-17 10:16:26 -07:00
parent d2ff763533
commit 9dcb426819
2 changed files with 23 additions and 19 deletions

View File

@ -445,6 +445,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8824. Do not use small blocks for balancing the cluster. (szetszwo) HDFS-8824. Do not use small blocks for balancing the cluster. (szetszwo)
HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (wang)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -33,8 +33,6 @@ import java.util.Set;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
@ -50,6 +48,8 @@ import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.IntrusiveCollection;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -61,7 +61,8 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class DatanodeDescriptor extends DatanodeInfo { public class DatanodeDescriptor extends DatanodeInfo {
public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class); public static final Logger LOG =
LoggerFactory.getLogger(DatanodeDescriptor.class);
public static final DatanodeDescriptor[] EMPTY_ARRAY = {}; public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
// Stores status of decommissioning. // Stores status of decommissioning.
@ -321,9 +322,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
Map.Entry<String, DatanodeStorageInfo> entry = iter.next(); Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
DatanodeStorageInfo storageInfo = entry.getValue(); DatanodeStorageInfo storageInfo = entry.getValue();
if (storageInfo.getLastBlockReportId() != curBlockReportId) { if (storageInfo.getLastBlockReportId() != curBlockReportId) {
LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" + LOG.info("{} had lastBlockReportId 0x{} but curBlockReportId = 0x{}",
Long.toHexString(storageInfo.getLastBlockReportId()) + storageInfo.getStorageID(),
", but curBlockReportId = 0x" + Long.toHexString(storageInfo.getLastBlockReportId()),
Long.toHexString(curBlockReportId)); Long.toHexString(curBlockReportId));
iter.remove(); iter.remove();
if (zombies == null) { if (zombies == null) {
@ -448,8 +449,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
} }
if (checkFailedStorages) { if (checkFailedStorages) {
LOG.info("Number of failed storage changes from " if (this.volumeFailures != volFailures) {
+ this.volumeFailures + " to " + volFailures); LOG.info("Number of failed storages changes from {} to {}",
this.volumeFailures, volFailures);
}
synchronized (storageMap) { synchronized (storageMap) {
failedStorageInfos = failedStorageInfos =
new HashSet<>(storageMap.values()); new HashSet<>(storageMap.values());
@ -500,10 +503,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/ */
private void pruneStorageMap(final StorageReport[] reports) { private void pruneStorageMap(final StorageReport[] reports) {
synchronized (storageMap) { synchronized (storageMap) {
if (LOG.isDebugEnabled()) { LOG.debug("Number of storages reported in heartbeat={};"
LOG.debug("Number of storages reported in heartbeat=" + reports.length + " Number of storages in storageMap={}", reports.length,
+ "; Number of storages in storageMap=" + storageMap.size()); storageMap.size());
}
HashMap<String, DatanodeStorageInfo> excessStorages; HashMap<String, DatanodeStorageInfo> excessStorages;
@ -520,11 +522,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
for (final DatanodeStorageInfo storageInfo : excessStorages.values()) { for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
if (storageInfo.numBlocks() == 0) { if (storageInfo.numBlocks() == 0) {
storageMap.remove(storageInfo.getStorageID()); storageMap.remove(storageInfo.getStorageID());
LOG.info("Removed storage " + storageInfo + " from DataNode" + this); LOG.info("Removed storage {} from DataNode {}", storageInfo, this);
} else if (LOG.isDebugEnabled()) { } else {
// This can occur until all block reports are received. // This can occur until all block reports are received.
LOG.debug("Deferring removal of stale storage " + storageInfo LOG.debug("Deferring removal of stale storage {} with {} blocks",
+ " with " + storageInfo.numBlocks() + " blocks"); storageInfo, storageInfo.numBlocks());
} }
} }
} }
@ -534,7 +536,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
Set<DatanodeStorageInfo> failedStorageInfos) { Set<DatanodeStorageInfo> failedStorageInfos) {
for (DatanodeStorageInfo storageInfo : failedStorageInfos) { for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
if (storageInfo.getState() != DatanodeStorage.State.FAILED) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
LOG.info(storageInfo + " failed."); LOG.info("{} failed.", storageInfo);
storageInfo.setState(DatanodeStorage.State.FAILED); storageInfo.setState(DatanodeStorage.State.FAILED);
} }
} }
@ -859,8 +861,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
synchronized (storageMap) { synchronized (storageMap) {
DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
if (storage == null) { if (storage == null) {
LOG.info("Adding new storage ID " + s.getStorageID() + LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
" for DN " + getXferAddr()); getXferAddr());
storage = new DatanodeStorageInfo(this, s); storage = new DatanodeStorageInfo(this, s);
storageMap.put(s.getStorageID(), storage); storageMap.put(s.getStorageID(), storage);
} else if (storage.getState() != s.getState() || } else if (storage.getState() != s.getState() ||