HDFS-9402. Switch DataNode.LOG to use slf4j. Contributed by Walter Su.

This commit is contained in:
Haohui Mai 2015-11-22 15:54:41 -08:00
parent 4c061e607e
commit 176ff5ce90
12 changed files with 33 additions and 24 deletions

View File

@ -1666,6 +1666,8 @@ Release 2.8.0 - UNRELEASED
HDFS-9439. Include status of closeAck into exception message in DataNode#run.
(Xiao Chen via Yongjun Zhang)
HDFS-9402. Switch DataNode.LOG to use slf4j. (Walter Su via wheat9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -22,7 +22,6 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@ -35,6 +34,8 @@ import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.slf4j.Logger;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
@ -54,7 +55,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
*/
@InterfaceAudience.Private
class BPOfferService {
static final Log LOG = DataNode.LOG;
static final Logger LOG = DataNode.LOG;
/**
* Information about the namespace that this service

View File

@ -32,7 +32,6 @@ import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
@ -65,6 +64,7 @@ import org.apache.hadoop.util.VersionUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
/**
* A thread per active or standby namenode to perform:
@ -78,7 +78,7 @@ import com.google.common.collect.Maps;
@InterfaceAudience.Private
class BPServiceActor implements Runnable {
static final Log LOG = DataNode.LOG;
static final Logger LOG = DataNode.LOG;
final InetSocketAddress nnAddr;
HAServiceState state;
@ -600,7 +600,7 @@ class BPServiceActor implements Runnable {
private synchronized void cleanUp() {
shouldServiceRun = false;
IOUtils.cleanup(LOG, bpNamenode);
IOUtils.cleanup(null, bpNamenode);
bpos.shutdownActor(this);
}
@ -834,7 +834,7 @@ class BPServiceActor implements Runnable {
sleepAndLogInterrupts(5000, "initializing");
} else {
runningState = RunningState.FAILED;
LOG.fatal("Initialization failed for " + this + ". Exiting. ", ioe);
LOG.error("Initialization failed for " + this + ". Exiting. ", ioe);
return;
}
}

View File

@ -23,7 +23,6 @@ import java.security.PrivilegedExceptionAction;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -35,6 +34,7 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.slf4j.Logger;
/**
* Manages the BPOfferService objects for the data node.
@ -43,7 +43,7 @@ import com.google.common.collect.Sets;
*/
@InterfaceAudience.Private
class BlockPoolManager {
private static final Log LOG = DataNode.LOG;
private static final Logger LOG = DataNode.LOG;
private final Map<String, BPOfferService> bpByNameserviceId =
Maps.newHashMap();

View File

@ -63,13 +63,14 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.SYNC_FILE_RANGE_WRITE;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
/** A class that receives a block and writes to its own disk, meanwhile
* may copies it to another site. If a throttler is provided,
* streaming throttling is also supported.
**/
class BlockReceiver implements Closeable {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
@VisibleForTesting
@ -960,7 +961,7 @@ class BlockReceiver implements Closeable {
// The worst case is not recovering this RBW replica.
// Client will fall back to regular pipeline recovery.
} finally {
IOUtils.cleanup(LOG, out);
IOUtils.closeStream(out);
}
try {
// Even if the connection is closed after the ack packet is

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -32,6 +31,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Daemon;
import org.slf4j.Logger;
import java.io.IOException;
import java.util.ArrayList;
@ -44,7 +44,7 @@ import java.util.List;
*/
@InterfaceAudience.Private
public class BlockRecoveryWorker {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;
private final DataNode datanode;
private final Configuration conf;

View File

@ -55,6 +55,7 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_SEQUENTIAL
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
/**
* Reads a block from the disk and sends it to a recipient.
@ -97,7 +98,7 @@ import com.google.common.base.Preconditions;
* no checksum error, it replies to DataNode with OP_STATUS_CHECKSUM_OK.
*/
class BlockSender implements java.io.Closeable {
static final Log LOG = DataNode.LOG;
static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
private static final boolean is32Bit =
System.getProperty("sun.arch.data.model").equals("32");

View File

@ -212,6 +212,8 @@ import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import com.google.protobuf.BlockingService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**********************************************************
* DataNode is a class (and program) that stores a set of
@ -248,7 +250,7 @@ import com.google.protobuf.BlockingService;
public class DataNode extends ReconfigurableBase
implements InterDatanodeProtocol, ClientDatanodeProtocol,
TraceAdminProtocol, DataNodeMXBean {
public static final Log LOG = LogFactory.getLog(DataNode.class);
public static final Logger LOG = LoggerFactory.getLogger(DataNode.class);
static{
HdfsConfiguration.init();
@ -2605,7 +2607,7 @@ public class DataNode extends ReconfigurableBase
errorCode = 1;
}
} catch (Throwable e) {
LOG.fatal("Exception in secureMain", e);
LOG.error("Exception in secureMain", e);
terminate(1, e);
} finally {
// We need to terminate the process here because either shutdown was called

View File

@ -89,13 +89,14 @@ import org.apache.hadoop.util.DataChecksum;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
/**
* Thread for processing incoming/outgoing data stream.
*/
class DataXceiver extends Receiver implements Runnable {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
private Peer peer;
@ -376,7 +377,7 @@ class DataXceiver extends Receiver implements Runnable {
blk.getBlockId(), dnR.getDatanodeUuid(), success));
}
if (fis != null) {
IOUtils.cleanup(LOG, fis);
IOUtils.cleanup(null, fis);
}
}
}

View File

@ -22,7 +22,6 @@ import java.net.SocketTimeoutException;
import java.nio.channels.AsynchronousCloseException;
import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.net.Peer;
@ -32,6 +31,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
/**
* Server used for receiving/sending a block of data.
@ -40,7 +40,7 @@ import com.google.common.annotations.VisibleForTesting;
* Hadoop IPC mechanism.
*/
class DataXceiverServer implements Runnable {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;
private final PeerServer peerServer;
private final DataNode datanode;
@ -262,7 +262,7 @@ class DataXceiverServer implements Runnable {
synchronized void closeAllPeers() {
LOG.info("Closing all peers.");
for (Peer p : peers.keySet()) {
IOUtils.cleanup(LOG, p);
IOUtils.cleanup(null, p);
}
peers.clear();
peersXceiver.clear();

View File

@ -44,7 +44,6 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
@ -78,6 +77,7 @@ import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
/**
* ErasureCodingWorker handles the erasure coding recovery work commands. These
@ -87,7 +87,7 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public final class ErasureCodingWorker {
private static final Log LOG = DataNode.LOG;
private static final Logger LOG = DataNode.LOG;
private final DataNode datanode;
private final Configuration conf;
@ -837,7 +837,7 @@ public final class ErasureCodingWorker {
return peer;
} finally {
if (!success) {
IOUtils.cleanup(LOG, peer);
IOUtils.cleanup(null, peer);
IOUtils.closeSocket(sock);
}
}

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.log4j.Level;
@ -45,7 +46,7 @@ public class TestDistCh extends junit.framework.TestCase {
{
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
).getLogger().setLevel(Level.ERROR);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ERROR);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
}