HDFS-12804. Use slf4j instead of log4j in FSEditLog. Contributed by Mukul Kumar Singh.

This commit is contained in:
Chen Liang 2017-11-20 12:49:53 -08:00
parent 0d781dd03b
commit 60fc2a1388
5 changed files with 24 additions and 25 deletions

View File

@ -29,8 +29,6 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -114,6 +112,8 @@ import org.apache.hadoop.security.token.delegation.DelegationKey;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* FSEditLog maintains a log of the namespace modifications. * FSEditLog maintains a log of the namespace modifications.
@ -122,9 +122,7 @@ import com.google.common.collect.Lists;
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class FSEditLog implements LogsPurgeable { public class FSEditLog implements LogsPurgeable {
public static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class);
public static final Log LOG = LogFactory.getLog(FSEditLog.class);
/** /**
* State machine for edit log. * State machine for edit log.
* *
@ -329,7 +327,8 @@ public class FSEditLog implements LogsPurgeable {
String error = String.format("Cannot start writing at txid %s " + String error = String.format("Cannot start writing at txid %s " +
"when there is a stream available for read: %s", "when there is a stream available for read: %s",
segmentTxId, streams.get(0)); segmentTxId, streams.get(0));
IOUtils.cleanup(LOG, streams.toArray(new EditLogInputStream[0])); IOUtils.cleanupWithLogger(LOG,
streams.toArray(new EditLogInputStream[0]));
throw new IllegalStateException(error); throw new IllegalStateException(error);
} }
@ -689,9 +688,9 @@ public class FSEditLog implements LogsPurgeable {
"Could not sync enough journals to persistent storage " + "Could not sync enough journals to persistent storage " +
"due to " + e.getMessage() + ". " + "due to " + e.getMessage() + ". " +
"Unsynced transactions: " + (txid - synctxid); "Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception()); LOG.error(msg, new Exception());
synchronized(journalSetLock) { synchronized(journalSetLock) {
IOUtils.cleanup(LOG, journalSet); IOUtils.cleanupWithLogger(LOG, journalSet);
} }
terminate(1, msg); terminate(1, msg);
} }
@ -715,9 +714,9 @@ public class FSEditLog implements LogsPurgeable {
final String msg = final String msg =
"Could not sync enough journals to persistent storage. " "Could not sync enough journals to persistent storage. "
+ "Unsynced transactions: " + (txid - synctxid); + "Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception()); LOG.error(msg, new Exception());
synchronized(journalSetLock) { synchronized(journalSetLock) {
IOUtils.cleanup(LOG, journalSet); IOUtils.cleanupWithLogger(LOG, journalSet);
} }
terminate(1, msg); terminate(1, msg);
} }
@ -772,7 +771,7 @@ public class FSEditLog implements LogsPurgeable {
buf.append(editLogStream.getNumSync()); buf.append(editLogStream.getNumSync());
buf.append(" SyncTimes(ms): "); buf.append(" SyncTimes(ms): ");
buf.append(journalSet.getSyncTimes()); buf.append(journalSet.getSyncTimes());
LOG.info(buf); LOG.info(buf.toString());
} }
/** Record the RPC IDs if necessary */ /** Record the RPC IDs if necessary */
@ -1711,7 +1710,7 @@ public class FSEditLog implements LogsPurgeable {
if (recovery != null) { if (recovery != null) {
// If recovery mode is enabled, continue loading even if we know we // If recovery mode is enabled, continue loading even if we know we
// can't load up to toAtLeastTxId. // can't load up to toAtLeastTxId.
LOG.error(e); LOG.error("Exception while selecting input streams", e);
} else { } else {
closeAllStreams(streams); closeAllStreams(streams);
throw e; throw e;

View File

@ -130,7 +130,7 @@ public class TestEditLog {
/** /**
* A garbage mkdir op which is used for testing * A garbage mkdir op which is used for testing
* {@link EditLogFileInputStream#scanEditLog(File)} * {@link EditLogFileInputStream#scanEditLog(File, long, boolean)}
*/ */
public static class GarbageMkdirOp extends FSEditLogOp { public static class GarbageMkdirOp extends FSEditLogOp {
public GarbageMkdirOp() { public GarbageMkdirOp() {
@ -1141,7 +1141,7 @@ public class TestEditLog {
/** /**
* Construct the failure specification. * Construct the failure specification.
* @param roll number to fail after. e.g. 1 to fail after the first roll * @param roll number to fail after. e.g. 1 to fail after the first roll
* @param loginfo index of journal to fail. * @param logindex index of journal to fail.
*/ */
AbortSpec(int roll, int logindex) { AbortSpec(int roll, int logindex) {
this.roll = roll; this.roll = roll;

View File

@ -27,8 +27,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -38,7 +36,9 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeEditLogRoller; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeEditLogRoller;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -51,7 +51,7 @@ import com.google.common.base.Supplier;
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TestEditLogAutoroll { public class TestEditLogAutoroll {
static { static {
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL); GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.DEBUG);
} }
@Parameters @Parameters
@ -74,7 +74,7 @@ public class TestEditLogAutoroll {
private FSEditLog editLog; private FSEditLog editLog;
private final Random random = new Random(); private final Random random = new Random();
private static final Log LOG = LogFactory.getLog(TestEditLog.class); public static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class);
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {

View File

@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Level; import org.slf4j.event.Level;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
@ -67,7 +67,7 @@ import org.mockito.stubbing.Answer;
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TestEditLogRace { public class TestEditLogRace {
static { static {
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL); GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.DEBUG);
} }
@Parameters @Parameters

View File

@ -60,7 +60,7 @@ import org.mockito.Mockito;
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TestEditLogTailer { public class TestEditLogTailer {
static { static {
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL); GenericTestUtils.setLogLevel(FSEditLog.LOG, org.slf4j.event.Level.DEBUG);
} }
@Parameters @Parameters
@ -82,9 +82,9 @@ public class TestEditLogTailer {
static final long NN_LAG_TIMEOUT = 10 * 1000; static final long NN_LAG_TIMEOUT = 10 * 1000;
static { static {
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL); GenericTestUtils.setLogLevel(FSImage.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL); GenericTestUtils.setLogLevel(FSEditLog.LOG, org.slf4j.event.Level.DEBUG);
GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.ALL); GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.DEBUG);
} }
private static Configuration getConf() { private static Configuration getConf() {