HDFS-13849. Migrate logging to slf4j in hadoop-hdfs-httpfs, hadoop-hdfs-nfs, hadoop-hdfs-rbf, hadoop-hdfs-native-client. Contributed by Ian Pickering.

This commit is contained in:
Giovanni Matteo Fumarola 2018-08-27 10:18:05 -07:00
parent e8b063f630
commit 7b1fa5693e
12 changed files with 44 additions and 39 deletions

View File

@ -22,8 +22,8 @@ import java.util.ArrayList;
import java.util.concurrent.atomic.*; import java.util.concurrent.atomic.*;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.fs.permission.*;
@ -48,7 +48,7 @@ public class TestFuseDFS {
private static Runtime r; private static Runtime r;
private static String mountPoint; private static String mountPoint;
private static final Log LOG = LogFactory.getLog(TestFuseDFS.class); private static final Logger LOG = LoggerFactory.getLogger(TestFuseDFS.class);
{ {
GenericTestUtils.setLogLevel(LOG, Level.ALL); GenericTestUtils.setLogLevel(LOG, Level.ALL);
} }

View File

@ -26,8 +26,8 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.HashMap; import java.util.HashMap;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
@ -61,7 +61,8 @@ import com.google.common.annotations.VisibleForTesting;
* RPC program corresponding to mountd daemon. See {@link Mountd}. * RPC program corresponding to mountd daemon. See {@link Mountd}.
*/ */
public class RpcProgramMountd extends RpcProgram implements MountInterface { public class RpcProgramMountd extends RpcProgram implements MountInterface {
private static final Log LOG = LogFactory.getLog(RpcProgramMountd.class); private static final Logger LOG =
LoggerFactory.getLogger(RpcProgramMountd.class);
public static final int PROGRAM = 100005; public static final int PROGRAM = 100005;
public static final int VERSION_1 = 1; public static final int VERSION_1 = 1;
public static final int VERSION_2 = 2; public static final int VERSION_2 = 2;

View File

@ -22,8 +22,8 @@ import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
/** /**
* This class is a thread pool to easily schedule async data operations. Current * This class is a thread pool to easily schedule async data operations. Current
@ -31,7 +31,7 @@ import org.apache.commons.logging.LogFactory;
* for readahead operations too. * for readahead operations too.
*/ */
public class AsyncDataService { public class AsyncDataService {
static final Log LOG = LogFactory.getLog(AsyncDataService.class); static final Logger LOG = LoggerFactory.getLogger(AsyncDataService.class);
// ThreadPool core pool size // ThreadPool core pool size
private static final int CORE_THREADS_PER_VOLUME = 1; private static final int CORE_THREADS_PER_VOLUME = 1;

View File

@ -1211,11 +1211,11 @@ class OpenFileCtx {
LOG.info("Clean up open file context for fileId: {}", LOG.info("Clean up open file context for fileId: {}",
latestAttr.getFileId()); latestAttr.getFileId());
cleanup(); cleanupWithLogger();
} }
} }
synchronized void cleanup() { synchronized void cleanupWithLogger() {
if (!activeState) { if (!activeState) {
LOG.info("Current OpenFileCtx is already inactive, no need to cleanup."); LOG.info("Current OpenFileCtx is already inactive, no need to cleanup.");
return; return;

View File

@ -22,8 +22,8 @@ import java.util.Iterator;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.FileHandle;
@ -39,7 +39,8 @@ import com.google.common.collect.Maps;
* used to maintain the writing context for a single file. * used to maintain the writing context for a single file.
*/ */
class OpenFileCtxCache { class OpenFileCtxCache {
private static final Log LOG = LogFactory.getLog(OpenFileCtxCache.class); private static final Logger LOG =
LoggerFactory.getLogger(OpenFileCtxCache.class);
// Insert and delete with openFileMap are synced // Insert and delete with openFileMap are synced
private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps
.newConcurrentMap(); .newConcurrentMap();
@ -138,7 +139,7 @@ class OpenFileCtxCache {
// Cleanup the old stream outside the lock // Cleanup the old stream outside the lock
if (toEvict != null) { if (toEvict != null) {
toEvict.cleanup(); toEvict.cleanupWithLogger();
} }
return true; return true;
} }
@ -178,7 +179,7 @@ class OpenFileCtxCache {
// Invoke the cleanup outside the lock // Invoke the cleanup outside the lock
for (OpenFileCtx ofc : ctxToRemove) { for (OpenFileCtx ofc : ctxToRemove) {
ofc.cleanup(); ofc.cleanupWithLogger();
} }
} }
@ -214,7 +215,7 @@ class OpenFileCtxCache {
// Invoke the cleanup outside the lock // Invoke the cleanup outside the lock
for (OpenFileCtx ofc : cleanedContext) { for (OpenFileCtx ofc : cleanedContext) {
ofc.cleanup(); ofc.cleanupWithLogger();
} }
} }

View File

@ -22,8 +22,8 @@ import java.net.SocketException;
import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext; import org.apache.commons.daemon.DaemonContext;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
@ -37,7 +37,8 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
* Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880 * Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880
*/ */
public class PrivilegedNfsGatewayStarter implements Daemon { public class PrivilegedNfsGatewayStarter implements Daemon {
static final Log LOG = LogFactory.getLog(PrivilegedNfsGatewayStarter.class); static final Logger LOG =
LoggerFactory.getLogger(PrivilegedNfsGatewayStarter.class);
private String[] args = null; private String[] args = null;
private DatagramSocket registrationSocket = null; private DatagramSocket registrationSocket = null;
private Nfs3 nfs3Server = null; private Nfs3 nfs3Server = null;

View File

@ -22,8 +22,8 @@ import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
@ -37,7 +37,7 @@ import com.google.common.base.Preconditions;
* xid and reply status. * xid and reply status.
*/ */
class WriteCtx { class WriteCtx {
public static final Log LOG = LogFactory.getLog(WriteCtx.class); public static final Logger LOG = LoggerFactory.getLogger(WriteCtx.class);
/** /**
* In memory write data has 3 states. ALLOW_DUMP: not sequential write, still * In memory write data has 3 states. ALLOW_DUMP: not sequential write, still

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
@ -51,7 +51,7 @@ import com.google.common.annotations.VisibleForTesting;
* Manage the writes and responds asynchronously. * Manage the writes and responds asynchronously.
*/ */
public class WriteManager { public class WriteManager {
public static final Log LOG = LogFactory.getLog(WriteManager.class); public static final Logger LOG = LoggerFactory.getLogger(WriteManager.class);
private final NfsConfiguration config; private final NfsConfiguration config;
private final IdMappingServiceProvider iug; private final IdMappingServiceProvider iug;

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.nfs;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
@ -35,7 +35,7 @@ import static org.junit.Assert.assertTrue;
public class TestMountd { public class TestMountd {
public static final Log LOG = LogFactory.getLog(TestMountd.class); public static final Logger LOG = LoggerFactory.getLogger(TestMountd.class);
@Test @Test
public void testStart() throws IOException { public void testStart() throws IOException {

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.nfs;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Arrays; import java.util.Arrays;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils;
@ -51,7 +51,8 @@ import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.MessageEvent;
public class TestOutOfOrderWrite { public class TestOutOfOrderWrite {
public final static Log LOG = LogFactory.getLog(TestOutOfOrderWrite.class); public final static Logger LOG =
LoggerFactory.getLogger(TestOutOfOrderWrite.class);
static FileHandle handle = null; static FileHandle handle = null;
static Channel channel; static Channel channel;
@ -179,4 +180,4 @@ public class TestOutOfOrderWrite {
// TODO: convert to Junit test, and validate result automatically // TODO: convert to Junit test, and validate result automatically
} }
} }

View File

@ -21,8 +21,8 @@ import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
@ -35,7 +35,8 @@ import org.apache.hadoop.security.UserGroupInformation;
* Class that helps in checking permissions in Router-based federation. * Class that helps in checking permissions in Router-based federation.
*/ */
public class RouterPermissionChecker extends FSPermissionChecker { public class RouterPermissionChecker extends FSPermissionChecker {
static final Log LOG = LogFactory.getLog(RouterPermissionChecker.class); static final Logger LOG =
LoggerFactory.getLogger(RouterPermissionChecker.class);
/** Mount table default permission. */ /** Mount table default permission. */
public static final short MOUNT_TABLE_PERMISSION_DEFAULT = 00755; public static final short MOUNT_TABLE_PERMISSION_DEFAULT = 00755;

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.federation.store;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import org.apache.commons.logging.Log; import org.slf4j.Logger;
import org.apache.commons.logging.LogFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
@InterfaceStability.Evolving @InterfaceStability.Evolving
public abstract class RecordStore<R extends BaseRecord> { public abstract class RecordStore<R extends BaseRecord> {
private static final Log LOG = LogFactory.getLog(RecordStore.class); private static final Logger LOG = LoggerFactory.getLogger(RecordStore.class);
/** Class of the record stored in this State Store. */ /** Class of the record stored in this State Store. */