HDFS-2200. Change FSNamesystem.LOG to package private.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1151344 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
969a263188
commit
0b12cc822d
|
@ -607,6 +607,8 @@ Trunk (unreleased changes)
|
|||
HDFS-2191. Move datanodeMap from FSNamesystem to DatanodeManager.
|
||||
(szetszwo)
|
||||
|
||||
HDFS-2200. Change FSNamesystem.LOG to package private. (szetszwo)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
|
||||
|
|
|
@ -212,12 +212,12 @@ public class BlockManager {
|
|||
this.replicationRecheckInterval =
|
||||
conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
|
||||
FSNamesystem.LOG.info("defaultReplication = " + defaultReplication);
|
||||
FSNamesystem.LOG.info("maxReplication = " + maxReplication);
|
||||
FSNamesystem.LOG.info("minReplication = " + minReplication);
|
||||
FSNamesystem.LOG.info("maxReplicationStreams = " + maxReplicationStreams);
|
||||
FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
|
||||
FSNamesystem.LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
||||
LOG.info("defaultReplication = " + defaultReplication);
|
||||
LOG.info("maxReplication = " + maxReplication);
|
||||
LOG.info("minReplication = " + minReplication);
|
||||
LOG.info("maxReplicationStreams = " + maxReplicationStreams);
|
||||
LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
|
||||
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
||||
}
|
||||
|
||||
public void activate(Configuration conf) {
|
||||
|
@ -344,9 +344,7 @@ public class BlockManager {
|
|||
namesystem.dir.updateSpaceConsumed(path, 0, -diff
|
||||
* fileINode.getReplication());
|
||||
} catch (IOException e) {
|
||||
FSNamesystem.LOG
|
||||
.warn("Unexpected exception while updating disk space : "
|
||||
+ e.getMessage());
|
||||
LOG.warn("Unexpected exception while updating disk space.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -517,7 +515,7 @@ public class BlockManager {
|
|||
final int numCorruptNodes = countNodes(blk).corruptReplicas();
|
||||
final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk);
|
||||
if (numCorruptNodes != numCorruptReplicas) {
|
||||
FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for "
|
||||
LOG.warn("Inconsistent number of corrupt replicas for "
|
||||
+ blk + " blockMap has " + numCorruptNodes
|
||||
+ " but corrupt replicas map has " + numCorruptReplicas);
|
||||
}
|
||||
|
@ -915,7 +913,7 @@ public class BlockManager {
|
|||
Block block = neededReplicationsIterator.next();
|
||||
int priority = neededReplicationsIterator.getPriority();
|
||||
if (priority < 0 || priority >= blocksToReplicate.size()) {
|
||||
FSNamesystem.LOG.warn("Unexpected replication priority: "
|
||||
LOG.warn("Unexpected replication priority: "
|
||||
+ priority + " " + block);
|
||||
} else {
|
||||
blocksToReplicate.get(priority).add(block);
|
||||
|
@ -1384,8 +1382,8 @@ public class BlockManager {
|
|||
Collection<BlockInfo> toCorrupt,
|
||||
Collection<StatefulBlockInfo> toUC) {
|
||||
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug("Reported block " + block
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Reported block " + block
|
||||
+ " on " + dn.getName() + " size " + block.getNumBytes()
|
||||
+ " replicaState = " + reportedState);
|
||||
}
|
||||
|
@ -1401,8 +1399,8 @@ public class BlockManager {
|
|||
BlockUCState ucState = storedBlock.getBlockUCState();
|
||||
|
||||
// Block is on the NN
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug("In memory blockUCState = " + ucState);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("In memory blockUCState = " + ucState);
|
||||
}
|
||||
|
||||
// Ignore replicas already scheduled to be removed from the DN
|
||||
|
@ -1457,7 +1455,7 @@ public class BlockManager {
|
|||
case RUR: // should not be reported
|
||||
case TEMPORARY: // should not be reported
|
||||
default:
|
||||
FSNamesystem.LOG.warn("Unexpected replica state " + reportedState
|
||||
LOG.warn("Unexpected replica state " + reportedState
|
||||
+ " for block: " + storedBlock +
|
||||
" on " + dn.getName() + " size " + storedBlock.getNumBytes());
|
||||
return true;
|
||||
|
@ -1625,7 +1623,7 @@ public class BlockManager {
|
|||
int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
|
||||
int numCorruptNodes = num.corruptReplicas();
|
||||
if (numCorruptNodes != corruptReplicasCount) {
|
||||
FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " +
|
||||
LOG.warn("Inconsistent number of corrupt replicas for " +
|
||||
storedBlock + "blockMap has " + numCorruptNodes +
|
||||
" but corrupt replicas map has " + corruptReplicasCount);
|
||||
}
|
||||
|
@ -1708,10 +1706,10 @@ public class BlockManager {
|
|||
} finally {
|
||||
namesystem.writeUnlock();
|
||||
}
|
||||
FSNamesystem.LOG.info("Total number of blocks = " + blocksMap.size());
|
||||
FSNamesystem.LOG.info("Number of invalid blocks = " + nrInvalid);
|
||||
FSNamesystem.LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
|
||||
FSNamesystem.LOG.info("Number of over-replicated blocks = " + nrOverReplicated);
|
||||
LOG.info("Total number of blocks = " + blocksMap.size());
|
||||
LOG.info("Number of invalid blocks = " + nrInvalid);
|
||||
LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
|
||||
LOG.info("Number of over-replicated blocks = " + nrOverReplicated);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1955,7 +1953,7 @@ public class BlockManager {
|
|||
nodeList.append(node.name);
|
||||
nodeList.append(" ");
|
||||
}
|
||||
FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: "
|
||||
LOG.info("Block: " + block + ", Expected Replicas: "
|
||||
+ curExpectedReplicas + ", live replicas: " + curReplicas
|
||||
+ ", corrupt replicas: " + num.corruptReplicas()
|
||||
+ ", decommissioned replicas: " + num.decommissionedReplicas()
|
||||
|
|
|
@ -22,6 +22,8 @@ import java.util.Collection;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -38,7 +40,8 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public abstract class BlockPlacementPolicy {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public static class NotEnoughReplicasException extends Exception {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
|
|
@ -212,7 +212,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes,
|
||||
blocksize, maxNodesPerRack, results);
|
||||
} catch (NotEnoughReplicasException e) {
|
||||
FSNamesystem.LOG.warn("Not able to place enough replicas, still in need of "
|
||||
LOG.warn("Not able to place enough replicas, still in need of "
|
||||
+ numOfReplicas + " to reach " + totalReplicasExpected + "\n"
|
||||
+ e.getMessage());
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
int numOfAvailableNodes =
|
||||
clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
|
||||
StringBuilder builder = null;
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
builder = threadLocalBuilder.get();
|
||||
builder.setLength(0);
|
||||
builder.append("[");
|
||||
|
@ -366,7 +366,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
}
|
||||
|
||||
String detail = enableDebugLogging;
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
if (badTarget && builder != null) {
|
||||
detail = builder.append("]").toString();
|
||||
builder.setLength(0);
|
||||
|
@ -388,7 +388,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
int numOfAvailableNodes =
|
||||
clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
|
||||
StringBuilder builder = null;
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
builder = threadLocalBuilder.get();
|
||||
builder.setLength(0);
|
||||
builder.append("[");
|
||||
|
@ -412,7 +412,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
|
||||
if (numOfReplicas>0) {
|
||||
String detail = enableDebugLogging;
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
if (badTarget && builder != null) {
|
||||
detail = builder.append("]").toString();
|
||||
builder.setLength(0);
|
||||
|
@ -439,7 +439,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
List<DatanodeDescriptor> results) {
|
||||
// check if the node is (being) decommissed
|
||||
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
threadLocalBuilder.get().append(node.toString()).append(": ")
|
||||
.append("Node ").append(NodeBase.getPath(node))
|
||||
.append(" is not chosen because the node is (being) decommissioned ");
|
||||
|
@ -451,7 +451,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
(node.getBlocksScheduled() * blockSize);
|
||||
// check the remaining capacity of the target machine
|
||||
if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
threadLocalBuilder.get().append(node.toString()).append(": ")
|
||||
.append("Node ").append(NodeBase.getPath(node))
|
||||
.append(" is not chosen because the node does not have enough space ");
|
||||
|
@ -467,7 +467,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
avgLoad = (double)stats.getTotalLoad()/size;
|
||||
}
|
||||
if (node.getXceiverCount() > (2.0 * avgLoad)) {
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
threadLocalBuilder.get().append(node.toString()).append(": ")
|
||||
.append("Node ").append(NodeBase.getPath(node))
|
||||
.append(" is not chosen because the node is too busy ");
|
||||
|
@ -487,7 +487,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
}
|
||||
}
|
||||
if (counter>maxTargetPerLoc) {
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
threadLocalBuilder.get().append(node.toString()).append(": ")
|
||||
.append("Node ").append(NodeBase.getPath(node))
|
||||
.append(" is not chosen because the rack has too many chosen nodes ");
|
||||
|
|
|
@ -19,16 +19,22 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
|
||||
import java.io.DataInput;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/**************************************************
|
||||
|
@ -326,7 +332,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
|
||||
if(recoverBlocks.contains(block)) {
|
||||
// this prevents adding the same block twice to the recovery queue
|
||||
FSNamesystem.LOG.info("Block " + block +
|
||||
BlockManager.LOG.info("Block " + block +
|
||||
" is already in the recovery queue.");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -17,14 +17,18 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.Util.now;
|
||||
import org.apache.hadoop.util.*;
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import java.io.PrintWriter;
|
||||
import java.sql.Time;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
|
||||
/***************************************************
|
||||
* PendingReplicationBlocks does the bookkeeping of all
|
||||
|
@ -38,6 +42,8 @@ import java.sql.Time;
|
|||
*
|
||||
***************************************************/
|
||||
class PendingReplicationBlocks {
|
||||
private static final Log LOG = BlockManager.LOG;
|
||||
|
||||
private Map<Block, PendingBlockInfo> pendingReplications;
|
||||
private ArrayList<Block> timedOutItems;
|
||||
Daemon timerThread = null;
|
||||
|
@ -87,9 +93,8 @@ class PendingReplicationBlocks {
|
|||
synchronized (pendingReplications) {
|
||||
PendingBlockInfo found = pendingReplications.get(block);
|
||||
if (found != null) {
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug("Removing pending replication for block" +
|
||||
block);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Removing pending replication for " + block);
|
||||
}
|
||||
found.decrementReplicas();
|
||||
if (found.getNumReplicas() <= 0) {
|
||||
|
@ -186,9 +191,8 @@ class PendingReplicationBlocks {
|
|||
pendingReplicationCheck();
|
||||
Thread.sleep(period);
|
||||
} catch (InterruptedException ie) {
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug(
|
||||
"PendingReplicationMonitor thread received exception. " + ie);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("PendingReplicationMonitor thread is interrupted.", ie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -202,8 +206,8 @@ class PendingReplicationBlocks {
|
|||
Iterator<Map.Entry<Block, PendingBlockInfo>> iter =
|
||||
pendingReplications.entrySet().iterator();
|
||||
long now = now();
|
||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q");
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("PendingReplicationMonitor checking Q");
|
||||
}
|
||||
while (iter.hasNext()) {
|
||||
Map.Entry<Block, PendingBlockInfo> entry = iter.next();
|
||||
|
@ -213,8 +217,7 @@ class PendingReplicationBlocks {
|
|||
synchronized (timedOutItems) {
|
||||
timedOutItems.add(block);
|
||||
}
|
||||
FSNamesystem.LOG.warn(
|
||||
"PendingReplicationMonitor timed out block " + block);
|
||||
LOG.warn("PendingReplicationMonitor timed out " + block);
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ import org.mortbay.util.ajax.JSON;
|
|||
@Metrics(context="dfs")
|
||||
public class FSNamesystem implements FSConstants, FSNamesystemMBean,
|
||||
FSClusterStats, NameNodeMXBean {
|
||||
public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
|
||||
static final Log LOG = LogFactory.getLog(FSNamesystem.class);
|
||||
|
||||
private static final ThreadLocal<StringBuilder> auditBuffer =
|
||||
new ThreadLocal<StringBuilder>() {
|
||||
|
|
|
@ -236,7 +236,7 @@ public class TestFiPipelines {
|
|||
|
||||
private static void initLoggers() {
|
||||
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
|
@ -44,7 +45,7 @@ public class TestDatanodeDeath extends TestCase {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
|
@ -49,7 +50,7 @@ public class TestFileAppend2 extends TestCase {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import junit.extensions.TestSetup;
|
|||
import junit.framework.Test;
|
||||
import junit.framework.TestSuite;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -46,7 +47,7 @@ public class TestFileAppend3 extends junit.framework.TestCase {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
|
||||
|
|
|
@ -70,7 +70,7 @@ public class TestFileAppend4 {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
|
@ -54,7 +55,7 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
|
|||
|
||||
{
|
||||
((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.ArrayList;
|
|||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
|
@ -48,7 +49,7 @@ import org.apache.log4j.Level;
|
|||
public class TestFileCorruption extends TestCase {
|
||||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.io.IOException;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
|
@ -59,7 +60,7 @@ public class TestFileCreation extends junit.framework.TestCase {
|
|||
{
|
||||
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
|
@ -40,7 +41,7 @@ public class TestFileCreationClient extends junit.framework.TestCase {
|
|||
{
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -33,7 +34,7 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
public void testFileCreationDeleteParent() throws IOException {
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.util.ConcurrentModificationException;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||
|
||||
/**
|
||||
* Test empty file creation.
|
||||
|
@ -40,7 +40,7 @@ public class TestFileCreationEmpty extends junit.framework.TestCase {
|
|||
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
|
||||
public void uncaughtException(Thread t, Throwable e) {
|
||||
if (e instanceof ConcurrentModificationException) {
|
||||
FSNamesystem.LOG.error("t=" + t, e);
|
||||
LeaseManager.LOG.error("t=" + t, e);
|
||||
isConcurrentModificationException = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -51,7 +52,7 @@ import org.junit.Test;
|
|||
*/
|
||||
public class TestFileStatus {
|
||||
{
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class TestLeaseRecovery2 {
|
|||
{
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
static final private long BLOCK_SIZE = 1024;
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.ArrayList;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -53,7 +54,7 @@ public class TestMultiThreadedHflush {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
|
||||
|
|
|
@ -160,7 +160,7 @@ public class TestPipelines {
|
|||
|
||||
private static void initLoggers() {
|
||||
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.io.OutputStream;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -38,7 +39,7 @@ import org.junit.Test;
|
|||
/** Test reading from hdfs while a file is being written. */
|
||||
public class TestReadWhileWriting {
|
||||
{
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -33,7 +34,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
|
|||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
//TODO: un-comment checkFullFile once the lease recovery is done
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.List;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -56,7 +57,7 @@ public class TestBalancerWithMultipleNameNodes {
|
|||
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
|
||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
|
||||
// ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
|
||||
}
|
||||
|
||||
|
|
|
@ -667,7 +667,7 @@ public class TestBlockReport {
|
|||
|
||||
private static void initLoggers() {
|
||||
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ public class TestBlockRecovery {
|
|||
new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3);
|
||||
|
||||
static {
|
||||
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue