HDFS-9130. Use GenericTestUtils#setLogLevel to the logging level. Contributed by Mingliang Liu.

This commit is contained in:
Haohui Mai 2015-09-23 23:59:19 -07:00
parent a9aafad12b
commit 4893adff19
60 changed files with 151 additions and 178 deletions

View File

@ -950,6 +950,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8733. Keep server related definition in hdfs.proto on server side.
(Mingliang Liu via wheat9)
HDFS-9130. Use GenericTestUtils#setLogLevel to the logging level.
(Mingliang Liu via wheat9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -24,12 +24,12 @@ import java.util.concurrent.atomic.*;
import org.apache.log4j.Level;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
@ -50,7 +50,7 @@ public class TestFuseDFS {
private static final Log LOG = LogFactory.getLog(TestFuseDFS.class);
{
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LOG, Level.ALL);
}
/** Dump the given intput stream to stderr */

View File

@ -24,7 +24,6 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fi.FiTestUtil;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -235,14 +234,14 @@ public class TestFiPipelines {
}
private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
GenericTestUtils.setLogLevel(LogFactory.getLog(FSNamesystem.class), Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(TestFiPipelines.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FiTestUtil.LOG, Level.ALL);
GenericTestUtils.setLogLevel(BlockReceiverAspects.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClientAspects.LOG, Level.ALL);
}
}

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fi.DataTransferTestUtil;
import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
@ -40,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -64,7 +64,7 @@ public class TestFiDataTransferProtocol {
}
{
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL);
}
/**

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -72,9 +73,9 @@ public class TestFiDataTransferProtocol2 {
}
{
((Log4JLogger) BlockReceiver.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(BlockReceiver.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL);
}
/**
* 1. create files with dfs

View File

@ -29,9 +29,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
@ -101,7 +101,7 @@ public class TestFcHdfsSetUMask {
{
try {
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.DEBUG);
GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
}
catch(Exception e) {
System.out.println("Cannot change log level\n"

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@ -173,7 +174,7 @@ public class BenchmarkThroughput extends Configured implements Tool {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
GenericTestUtils.setLogLevel(hadoopLog, Level.WARN);
}
int reps = 1;
if (args.length == 1) {

View File

@ -50,7 +50,6 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -930,7 +929,7 @@ public class TestDFSClientRetries {
public static void namenodeRestartTest(final Configuration conf,
final boolean isWebHDFS) throws Exception {
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
final List<Exception> exceptions = new ArrayList<Exception>();

View File

@ -43,7 +43,6 @@ import java.util.List;
import java.util.Random;
import java.util.Set;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -84,7 +83,7 @@ public class TestDistributedFileSystem {
private static final Random RAN = new Random();
static {
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
private boolean dualPortTesting = false;

View File

@ -26,8 +26,6 @@ import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ChecksumException;
@ -38,6 +36,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@ -63,9 +62,9 @@ public class TestFileConcurrentReader {
Logger.getLogger(TestFileConcurrentReader.class);
{
((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
static final long seed = 0xDEADBEEFL;

View File

@ -52,8 +52,6 @@ import java.net.UnknownHostException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag;
@ -100,10 +98,9 @@ public class TestFileCreation {
static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
{
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
private static final String RPC_DETAILED_METRICS =
"RpcDetailedActivityForPort";

View File

@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -31,6 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -41,10 +40,10 @@ public class TestFileCreationClient {
static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL);
}
/** Test lease recovery Triggered by DFSClient. */

View File

@ -26,8 +26,6 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
@ -38,6 +36,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -48,8 +47,8 @@ import org.junit.Test;
*/
public class TestFileStatus {
{
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FileSystem.LOG, Level.ALL);
}
static final long seed = 0xDEADBEEFL;

View File

@ -25,7 +25,6 @@ import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.EnumSet;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -35,6 +34,7 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -42,8 +42,8 @@ import org.junit.Test;
* newly introduced {@link FSDataOutputStream#hflush()} method */
public class TestHFlush {
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
private final String fName = "hflushtest.dat";

View File

@ -39,11 +39,11 @@ import org.junit.Test;
public class TestLargeBlock {
/**
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)TestLargeBlock.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(TestLargeBlock.LOG, Level.ALL);
}
*/
private static final Log LOG = LogFactory.getLog(TestLargeBlock.class);

View File

@ -28,7 +28,6 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
@ -47,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
@ -58,9 +58,9 @@ public class TestLeaseRecovery2 {
public static final Log LOG = LogFactory.getLog(TestLeaseRecovery2.class);
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
}
static final private long BLOCK_SIZE = 1024;

View File

@ -17,10 +17,10 @@
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestListFiles;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -30,7 +30,7 @@ import org.junit.BeforeClass;
*/
public class TestListFilesInDFS extends TestListFiles {
{
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FileSystem.LOG, Level.ALL);
}

View File

@ -25,7 +25,6 @@ import java.io.IOException;
import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -36,6 +35,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
@ -47,7 +47,7 @@ import org.junit.Test;
*/
public class TestListFilesInFileContext {
{
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FileSystem.LOG, Level.ALL);
}
static final long seed = 0xDEADBEEFL;

View File

@ -26,7 +26,6 @@ import java.io.File;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
@ -53,8 +52,8 @@ import org.junit.Test;
*/
public class TestPersistBlocks {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
}
private static final int BLOCK_SIZE = 4096;

View File

@ -21,8 +21,6 @@ import java.io.IOException;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -33,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -40,8 +39,8 @@ import org.junit.Test;
/** Test reading from hdfs while a file is being written. */
public class TestReadWhileWriting {
{
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
private static final String DIR = "/"

View File

@ -37,7 +37,6 @@ import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
@ -209,7 +208,7 @@ public class TestQJMWithFaults {
// If the user specifies a seed, then we should gather all the
// IPC trace information so that debugging is easier. This makes
// the test run about 25% slower otherwise.
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL);
} else {
seed = new Random().nextLong();
}

View File

@ -41,7 +41,6 @@ import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
@ -84,7 +83,7 @@ public class TestQuorumJournalManager {
private final List<QuorumJournalManager> toClose = Lists.newLinkedList();
static {
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL);
}
@Before

View File

@ -27,7 +27,6 @@ import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
import org.apache.hadoop.hdfs.qjournal.client.QuorumException;
@ -56,7 +55,7 @@ import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeOp;
*/
public class TestQuorumJournalManagerUnit {
static {
((Log4JLogger)QuorumJournalManager.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(QuorumJournalManager.LOG, Level.ALL);
}
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);

View File

@ -26,7 +26,6 @@ import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@ -44,6 +43,7 @@ import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -62,11 +62,11 @@ public class TestClientProtocolWithDelegationToken {
}
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(Client.LOG, Level.ALL);
GenericTestUtils.setLogLevel(Server.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.ALL);
}
@Test

View File

@ -30,7 +30,6 @@ import java.net.URI;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -53,6 +52,7 @@ import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
@ -169,7 +169,7 @@ public class TestDelegationToken {
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
+ config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
//get file system as JobTracker

View File

@ -36,7 +36,6 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@ -70,6 +69,7 @@ import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
@ -89,11 +89,11 @@ public class TestBlockToken {
private static final String ADDRESS = "0.0.0.0";
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(Client.LOG, Level.ALL);
GenericTestUtils.setLogLevel(Server.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.ALL);
}
/** Directory where we can count our open file descriptors under Linux */

View File

@ -51,7 +51,6 @@ import java.util.concurrent.TimeoutException;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -94,7 +93,7 @@ public class TestBalancer {
private static final Log LOG = LogFactory.getLog(TestBalancer.class);
static {
((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(Balancer.LOG, Level.ALL);
}
final static long CAPACITY = 5000L;

View File

@ -31,7 +31,6 @@ import java.util.Set;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.balancer.BalancerParameters;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -60,7 +60,7 @@ import org.junit.Test;
public class TestBalancerWithMultipleNameNodes {
static final Log LOG = Balancer.LOG;
{
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LOG, Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}

View File

@ -28,7 +28,6 @@ import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
@ -63,6 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -77,7 +77,7 @@ public class TestBlockTokenWithDFS {
private final byte[] rawData = new byte[FILE_SIZE];
{
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
Random r = new Random();
r.nextBytes(rawData);
}

View File

@ -26,7 +26,6 @@ import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
@ -40,14 +39,15 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
public class TestBlocksWithNotEnoughRacks {
public static final Log LOG = LogFactory.getLog(TestBlocksWithNotEnoughRacks.class);
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(LOG, Level.ALL);
}
/*

View File

@ -37,7 +37,6 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
@ -61,6 +60,7 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@ -72,7 +72,7 @@ import org.junit.rules.ExpectedException;
public class TestReplicationPolicy {
{
((Log4JLogger)BlockPlacementPolicy.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(BlockPlacementPolicy.LOG, Level.ALL);
}
private static final int BLOCK_SIZE = 1024;

View File

@ -33,7 +33,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSTestUtil;
@ -87,7 +86,7 @@ public class TestBPOfferService {
private long secondCallTime = 0;
static {
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
}
private DatanodeProtocolClientSideTranslatorPB mockNN1;

View File

@ -44,7 +44,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -115,8 +114,8 @@ public class TestBlockRecovery {
BLOCK_ID, BLOCK_LEN, GEN_STAMP);
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
GenericTestUtils.setLogLevel(LOG, Level.ALL);
}
/**

View File

@ -30,7 +30,6 @@ import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FileSystem;
@ -45,6 +44,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
@ -57,7 +57,8 @@ public class TestDataNodeVolumeFailureReporting {
private static final Log LOG = LogFactory.getLog(TestDataNodeVolumeFailureReporting.class);
{
((Log4JLogger)TestDataNodeVolumeFailureReporting.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(TestDataNodeVolumeFailureReporting.LOG,
Level.ALL);
}
private FileSystem fs;

View File

@ -32,7 +32,6 @@ import java.util.concurrent.ThreadLocalRandom;
import com.google.common.base.Supplier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
@ -83,7 +82,7 @@ public class TestDatanodeProtocolRetryPolicy {
DFSTestUtil.getLocalDatanodeRegistration();
static {
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LOG, Level.ALL);
}
/**

View File

@ -22,7 +22,6 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
@ -38,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -47,7 +47,7 @@ public class TestTransferRbw {
private static final Log LOG = LogFactory.getLog(TestTransferRbw.class);
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
}
private static final Random RAN = new Random();

View File

@ -22,7 +22,6 @@ import com.google.common.base.Supplier;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.hamcrest.core.Is.is;
@ -93,8 +92,8 @@ public class TestSpaceReservation {
}
static {
((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FsDatasetImpl.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
}
/**

View File

@ -28,7 +28,6 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -61,6 +60,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -74,12 +74,11 @@ import com.google.common.collect.Maps;
public class TestStorageMover {
static final Log LOG = LogFactory.getLog(TestStorageMover.class);
static {
((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)
).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(Dispatcher.class)
).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(DataTransferProtocol.class)).getLogger()
.setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LogFactory.getLog(BlockPlacementPolicy.class),
Level.ALL);
GenericTestUtils.setLogLevel(LogFactory.getLog(Dispatcher.class),
Level.ALL);
GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL);
}
private static final int BLOCK_SIZE = 1024;

View File

@ -22,11 +22,11 @@ import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.DefaultAuditLogger;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Rule;
import org.junit.Test;
@ -60,7 +60,7 @@ public class TestAuditLogAtDebug {
Joiner.on(",").join(debugCommands.get()));
}
logger.initialize(conf);
((Log4JLogger) FSNamesystem.auditLog).getLogger().setLevel(level);
GenericTestUtils.setLogLevel(FSNamesystem.auditLog, level);
return spy(logger);
}

View File

@ -30,7 +30,6 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -62,8 +61,8 @@ public class TestBackupNode {
static {
((Log4JLogger)Checkpointer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)BackupImage.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(Checkpointer.LOG, Level.ALL);
GenericTestUtils.setLogLevel(BackupImage.LOG, Level.ALL);
}
static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();

View File

@ -48,7 +48,6 @@ import com.google.common.io.Files;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -109,7 +108,7 @@ import com.google.common.primitives.Ints;
public class TestCheckpoint {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL);
}
static final Log LOG = LogFactory.getLog(TestCheckpoint.class);

View File

@ -56,7 +56,6 @@ import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
@ -102,7 +101,7 @@ import com.google.common.collect.Lists;
public class TestEditLog {
static {
((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL);
}
/**

View File

@ -32,7 +32,6 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
@ -47,6 +46,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
@ -59,7 +59,7 @@ import org.mockito.stubbing.Answer;
*/
public class TestEditLogRace {
static {
((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestEditLogRace.class);

View File

@ -35,7 +35,6 @@ import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -48,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -58,8 +58,8 @@ import com.google.common.io.Files;
public class TestFSEditLogLoader {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL);
}
private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class);

View File

@ -27,7 +27,6 @@ import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@ -46,6 +45,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
@ -58,7 +58,7 @@ import org.junit.Test;
public class TestFSImageWithSnapshot {
{
SnapshotTestHelper.disableLogs();
((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(INode.LOG, Level.ALL);
}
static final long seed = 0;

View File

@ -30,7 +30,6 @@ import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
@ -43,6 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
@ -52,7 +52,8 @@ import org.junit.Test;
public class TestFavoredNodesEndToEnd {
{
((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LogFactory.getLog(BlockPlacementPolicy.class),
Level.ALL);
}
private static MiniDFSCluster cluster;

View File

@ -92,6 +92,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@ -137,12 +138,12 @@ public class TestFsck {
throws Exception {
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.ALL);
int errCode = ToolRunner.run(new DFSck(conf, out), path);
if (checkErrorCode) {
assertEquals(expectedErrCode, errCode);
}
((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO);
GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.INFO);
FSImage.LOG.error("OUTPUT = " + bStream.toString());
return bStream.toString();
}

View File

@ -37,7 +37,6 @@ import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -80,7 +79,7 @@ import org.mockito.stubbing.Answer;
*/
public class TestSaveNamespace {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestSaveNamespace.class);

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -29,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.retry.RetryInvocationHandler;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
@ -46,11 +45,9 @@ import com.google.common.base.Supplier;
*/
public class TestDNFencingWithReplication {
static {
((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
((Log4JLogger)Server.LOG).getLogger().setLevel(Level.FATAL);
((Log4JLogger)LogFactory.getLog(
"org.apache.hadoop.io.retry.RetryInvocationHandler"))
.getLogger().setLevel(Level.FATAL);
GenericTestUtils.setLogLevel(FSNamesystem.auditLog, Level.WARN);
GenericTestUtils.setLogLevel(Server.LOG, Level.FATAL);
GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.FATAL);
}
private static final int NUM_THREADS = 20;

View File

@ -23,7 +23,6 @@ import java.io.File;
import java.io.IOException;
import java.net.URI;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
@ -51,8 +50,8 @@ public class TestEditLogTailer {
static final long NN_LAG_TIMEOUT = 10 * 1000;
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)EditLogTailer.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL);
GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.ALL);
}
@Test

View File

@ -23,8 +23,6 @@ import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -32,6 +30,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.junit.Test;
@ -39,7 +38,7 @@ import org.junit.Test;
public class TestHAFsck {
static {
((Log4JLogger)LogFactory.getLog(DFSUtil.class)).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(DFSUtil.LOG, Level.ALL);
}
/**

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@ -74,7 +73,7 @@ public class TestHAStateTransitions {
RequestSource.REQUEST_BY_USER_FORCED);
static {
((Log4JLogger)EditLogTailer.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.ALL);
}
/**

View File

@ -31,7 +31,6 @@ import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@ -57,6 +56,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.log4j.Level;
import org.junit.After;
@ -93,8 +93,8 @@ public class TestNameNodeMetrics {
"" + PERCENTILES_INTERVAL);
// Enable stale DataNodes checking
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
.getLogger().setLevel(Level.DEBUG);
GenericTestUtils.setLogLevel(LogFactory.getLog(MetricsAsserts.class),
Level.DEBUG);
}
private MiniDFSCluster cluster;

View File

@ -26,7 +26,6 @@ import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@ -48,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
@ -58,7 +58,7 @@ import org.junit.Test;
*/
public class TestINodeFileUnderConstructionWithSnapshot {
{
((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(INode.LOG, Level.ALL);
SnapshotTestHelper.disableLogs();
}

View File

@ -34,7 +34,6 @@ import java.util.HashMap;
import java.util.Random;
import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
@ -74,7 +73,7 @@ import org.junit.rules.ExpectedException;
*/
public class TestSnapshot {
{
((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(INode.LOG, Level.ALL);
SnapshotTestHelper.disableLogs();
}

View File

@ -28,7 +28,6 @@ import java.io.PrintStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAAdmin;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@ -37,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.apache.log4j.Level;
import org.junit.After;
@ -52,7 +52,7 @@ import com.google.common.io.Files;
*/
public class TestDFSHAAdminMiniCluster {
static {
((Log4JLogger)LogFactory.getLog(HAAdmin.class)).getLogger().setLevel(
GenericTestUtils.setLogLevel(LogFactory.getLog(HAAdmin.class),
Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestDFSHAAdminMiniCluster.class);

View File

@ -22,7 +22,6 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@ -34,12 +33,12 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
@ -50,8 +49,8 @@ import org.junit.Test;
*/
public class TestByteArrayManager {
static {
((Log4JLogger)LogFactory.getLog(ByteArrayManager.class)
).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
Level.ALL);
}
static final Log LOG = LogFactory.getLog(TestByteArrayManager.class);
@ -560,8 +559,8 @@ public class TestByteArrayManager {
}
public static void main(String[] args) throws Exception {
((Log4JLogger)LogFactory.getLog(ByteArrayManager.class)
).getLogger().setLevel(Level.OFF);
GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
Level.OFF);
final int arrayLength = 64 * 1024; //64k
final int nThreads = 512;

View File

@ -27,7 +27,6 @@ import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
@ -52,7 +52,7 @@ import org.junit.Test;
public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
{
((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(ExceptionHandler.LOG, Level.ALL);
}
private static MiniDFSCluster cluster = null;

View File

@ -36,7 +36,6 @@ import java.util.Random;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
@ -46,7 +45,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
@ -236,7 +234,7 @@ public class TestWebHDFS {
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
final Configuration conf = WebHdfsTestUtil.createConf();
TestDFSClientRetries.namenodeRestartTest(conf, true);
}

View File

@ -21,7 +21,6 @@ import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -46,7 +45,7 @@ public class TestWebHdfsWithMultipleNameNodes {
static final Log LOG = WebHdfsTestUtil.LOG;
static private void setLogLevel() {
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LOG, Level.ALL);
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);