HDFS-2200. Change FSNamesystem.LOG to package private.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1151344 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-07-27 05:59:50 +00:00
parent 969a263188
commit 0b12cc822d
27 changed files with 99 additions and 74 deletions

View File

@ -607,6 +607,8 @@ Trunk (unreleased changes)
HDFS-2191. Move datanodeMap from FSNamesystem to DatanodeManager. HDFS-2191. Move datanodeMap from FSNamesystem to DatanodeManager.
(szetszwo) (szetszwo)
HDFS-2200. Change FSNamesystem.LOG to package private. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -212,12 +212,12 @@ public class BlockManager {
this.replicationRecheckInterval = this.replicationRecheckInterval =
conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
FSNamesystem.LOG.info("defaultReplication = " + defaultReplication); LOG.info("defaultReplication = " + defaultReplication);
FSNamesystem.LOG.info("maxReplication = " + maxReplication); LOG.info("maxReplication = " + maxReplication);
FSNamesystem.LOG.info("minReplication = " + minReplication); LOG.info("minReplication = " + minReplication);
FSNamesystem.LOG.info("maxReplicationStreams = " + maxReplicationStreams); LOG.info("maxReplicationStreams = " + maxReplicationStreams);
FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
FSNamesystem.LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
} }
public void activate(Configuration conf) { public void activate(Configuration conf) {
@ -344,9 +344,7 @@ public class BlockManager {
namesystem.dir.updateSpaceConsumed(path, 0, -diff namesystem.dir.updateSpaceConsumed(path, 0, -diff
* fileINode.getReplication()); * fileINode.getReplication());
} catch (IOException e) { } catch (IOException e) {
FSNamesystem.LOG LOG.warn("Unexpected exception while updating disk space.", e);
.warn("Unexpected exception while updating disk space : "
+ e.getMessage());
} }
} }
} }
@ -517,7 +515,7 @@ public class BlockManager {
final int numCorruptNodes = countNodes(blk).corruptReplicas(); final int numCorruptNodes = countNodes(blk).corruptReplicas();
final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk); final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk);
if (numCorruptNodes != numCorruptReplicas) { if (numCorruptNodes != numCorruptReplicas) {
FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " LOG.warn("Inconsistent number of corrupt replicas for "
+ blk + " blockMap has " + numCorruptNodes + blk + " blockMap has " + numCorruptNodes
+ " but corrupt replicas map has " + numCorruptReplicas); + " but corrupt replicas map has " + numCorruptReplicas);
} }
@ -915,7 +913,7 @@ public class BlockManager {
Block block = neededReplicationsIterator.next(); Block block = neededReplicationsIterator.next();
int priority = neededReplicationsIterator.getPriority(); int priority = neededReplicationsIterator.getPriority();
if (priority < 0 || priority >= blocksToReplicate.size()) { if (priority < 0 || priority >= blocksToReplicate.size()) {
FSNamesystem.LOG.warn("Unexpected replication priority: " LOG.warn("Unexpected replication priority: "
+ priority + " " + block); + priority + " " + block);
} else { } else {
blocksToReplicate.get(priority).add(block); blocksToReplicate.get(priority).add(block);
@ -1384,8 +1382,8 @@ public class BlockManager {
Collection<BlockInfo> toCorrupt, Collection<BlockInfo> toCorrupt,
Collection<StatefulBlockInfo> toUC) { Collection<StatefulBlockInfo> toUC) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("Reported block " + block LOG.debug("Reported block " + block
+ " on " + dn.getName() + " size " + block.getNumBytes() + " on " + dn.getName() + " size " + block.getNumBytes()
+ " replicaState = " + reportedState); + " replicaState = " + reportedState);
} }
@ -1401,8 +1399,8 @@ public class BlockManager {
BlockUCState ucState = storedBlock.getBlockUCState(); BlockUCState ucState = storedBlock.getBlockUCState();
// Block is on the NN // Block is on the NN
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("In memory blockUCState = " + ucState); LOG.debug("In memory blockUCState = " + ucState);
} }
// Ignore replicas already scheduled to be removed from the DN // Ignore replicas already scheduled to be removed from the DN
@ -1457,7 +1455,7 @@ public class BlockManager {
case RUR: // should not be reported case RUR: // should not be reported
case TEMPORARY: // should not be reported case TEMPORARY: // should not be reported
default: default:
FSNamesystem.LOG.warn("Unexpected replica state " + reportedState LOG.warn("Unexpected replica state " + reportedState
+ " for block: " + storedBlock + + " for block: " + storedBlock +
" on " + dn.getName() + " size " + storedBlock.getNumBytes()); " on " + dn.getName() + " size " + storedBlock.getNumBytes());
return true; return true;
@ -1625,7 +1623,7 @@ public class BlockManager {
int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock); int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
int numCorruptNodes = num.corruptReplicas(); int numCorruptNodes = num.corruptReplicas();
if (numCorruptNodes != corruptReplicasCount) { if (numCorruptNodes != corruptReplicasCount) {
FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " + LOG.warn("Inconsistent number of corrupt replicas for " +
storedBlock + "blockMap has " + numCorruptNodes + storedBlock + "blockMap has " + numCorruptNodes +
" but corrupt replicas map has " + corruptReplicasCount); " but corrupt replicas map has " + corruptReplicasCount);
} }
@ -1708,10 +1706,10 @@ public class BlockManager {
} finally { } finally {
namesystem.writeUnlock(); namesystem.writeUnlock();
} }
FSNamesystem.LOG.info("Total number of blocks = " + blocksMap.size()); LOG.info("Total number of blocks = " + blocksMap.size());
FSNamesystem.LOG.info("Number of invalid blocks = " + nrInvalid); LOG.info("Number of invalid blocks = " + nrInvalid);
FSNamesystem.LOG.info("Number of under-replicated blocks = " + nrUnderReplicated); LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
FSNamesystem.LOG.info("Number of over-replicated blocks = " + nrOverReplicated); LOG.info("Number of over-replicated blocks = " + nrOverReplicated);
} }
/** /**
@ -1955,7 +1953,7 @@ public class BlockManager {
nodeList.append(node.name); nodeList.append(node.name);
nodeList.append(" "); nodeList.append(" ");
} }
FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: " LOG.info("Block: " + block + ", Expected Replicas: "
+ curExpectedReplicas + ", live replicas: " + curReplicas + curExpectedReplicas + ", live replicas: " + curReplicas
+ ", corrupt replicas: " + num.corruptReplicas() + ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas()

View File

@ -22,6 +22,8 @@ import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -38,7 +40,8 @@ import org.apache.hadoop.util.ReflectionUtils;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class BlockPlacementPolicy { public abstract class BlockPlacementPolicy {
static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
@InterfaceAudience.Private @InterfaceAudience.Private
public static class NotEnoughReplicasException extends Exception { public static class NotEnoughReplicasException extends Exception {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;

View File

@ -212,7 +212,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes,
blocksize, maxNodesPerRack, results); blocksize, maxNodesPerRack, results);
} catch (NotEnoughReplicasException e) { } catch (NotEnoughReplicasException e) {
FSNamesystem.LOG.warn("Not able to place enough replicas, still in need of " LOG.warn("Not able to place enough replicas, still in need of "
+ numOfReplicas + " to reach " + totalReplicasExpected + "\n" + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
+ e.getMessage()); + e.getMessage());
} }
@ -343,7 +343,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
int numOfAvailableNodes = int numOfAvailableNodes =
clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
StringBuilder builder = null; StringBuilder builder = null;
if (FSNamesystem.LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
builder = threadLocalBuilder.get(); builder = threadLocalBuilder.get();
builder.setLength(0); builder.setLength(0);
builder.append("["); builder.append("[");
@ -366,7 +366,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
} }
String detail = enableDebugLogging; String detail = enableDebugLogging;
if (FSNamesystem.LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
if (badTarget && builder != null) { if (badTarget && builder != null) {
detail = builder.append("]").toString(); detail = builder.append("]").toString();
builder.setLength(0); builder.setLength(0);
@ -388,7 +388,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
int numOfAvailableNodes = int numOfAvailableNodes =
clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
StringBuilder builder = null; StringBuilder builder = null;
if (FSNamesystem.LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
builder = threadLocalBuilder.get(); builder = threadLocalBuilder.get();
builder.setLength(0); builder.setLength(0);
builder.append("["); builder.append("[");
@ -412,7 +412,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
if (numOfReplicas>0) { if (numOfReplicas>0) {
String detail = enableDebugLogging; String detail = enableDebugLogging;
if (FSNamesystem.LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
if (badTarget && builder != null) { if (badTarget && builder != null) {
detail = builder.append("]").toString(); detail = builder.append("]").toString();
builder.setLength(0); builder.setLength(0);
@ -439,7 +439,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
List<DatanodeDescriptor> results) { List<DatanodeDescriptor> results) {
// check if the node is (being) decommissed // check if the node is (being) decommissed
if (node.isDecommissionInProgress() || node.isDecommissioned()) { if (node.isDecommissionInProgress() || node.isDecommissioned()) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ") threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node)) .append("Node ").append(NodeBase.getPath(node))
.append(" is not chosen because the node is (being) decommissioned "); .append(" is not chosen because the node is (being) decommissioned ");
@ -451,7 +451,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
(node.getBlocksScheduled() * blockSize); (node.getBlocksScheduled() * blockSize);
// check the remaining capacity of the target machine // check the remaining capacity of the target machine
if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) { if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ") threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node)) .append("Node ").append(NodeBase.getPath(node))
.append(" is not chosen because the node does not have enough space "); .append(" is not chosen because the node does not have enough space ");
@ -467,7 +467,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
avgLoad = (double)stats.getTotalLoad()/size; avgLoad = (double)stats.getTotalLoad()/size;
} }
if (node.getXceiverCount() > (2.0 * avgLoad)) { if (node.getXceiverCount() > (2.0 * avgLoad)) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ") threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node)) .append("Node ").append(NodeBase.getPath(node))
.append(" is not chosen because the node is too busy "); .append(" is not chosen because the node is too busy ");
@ -487,7 +487,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
} }
} }
if (counter>maxTargetPerLoc) { if (counter>maxTargetPerLoc) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ") threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node)) .append("Node ").append(NodeBase.getPath(node))
.append(" is not chosen because the rack has too many chosen nodes "); .append(" is not chosen because the rack has too many chosen nodes ");

View File

@ -19,16 +19,22 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.DataInput; import java.io.DataInput;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
/************************************************** /**************************************************
@ -326,7 +332,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
void addBlockToBeRecovered(BlockInfoUnderConstruction block) { void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
if(recoverBlocks.contains(block)) { if(recoverBlocks.contains(block)) {
// this prevents adding the same block twice to the recovery queue // this prevents adding the same block twice to the recovery queue
FSNamesystem.LOG.info("Block " + block + BlockManager.LOG.info("Block " + block +
" is already in the recovery queue."); " is already in the recovery queue.");
return; return;
} }

View File

@ -17,14 +17,18 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import static org.apache.hadoop.hdfs.server.common.Util.now; import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.util.*;
import java.io.*; import java.io.PrintWriter;
import java.util.*;
import java.sql.Time; import java.sql.Time;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.util.Daemon;
/*************************************************** /***************************************************
* PendingReplicationBlocks does the bookkeeping of all * PendingReplicationBlocks does the bookkeeping of all
@ -38,6 +42,8 @@ import java.sql.Time;
* *
***************************************************/ ***************************************************/
class PendingReplicationBlocks { class PendingReplicationBlocks {
private static final Log LOG = BlockManager.LOG;
private Map<Block, PendingBlockInfo> pendingReplications; private Map<Block, PendingBlockInfo> pendingReplications;
private ArrayList<Block> timedOutItems; private ArrayList<Block> timedOutItems;
Daemon timerThread = null; Daemon timerThread = null;
@ -87,9 +93,8 @@ class PendingReplicationBlocks {
synchronized (pendingReplications) { synchronized (pendingReplications) {
PendingBlockInfo found = pendingReplications.get(block); PendingBlockInfo found = pendingReplications.get(block);
if (found != null) { if (found != null) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("Removing pending replication for block" + LOG.debug("Removing pending replication for " + block);
block);
} }
found.decrementReplicas(); found.decrementReplicas();
if (found.getNumReplicas() <= 0) { if (found.getNumReplicas() <= 0) {
@ -186,9 +191,8 @@ class PendingReplicationBlocks {
pendingReplicationCheck(); pendingReplicationCheck();
Thread.sleep(period); Thread.sleep(period);
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug( LOG.debug("PendingReplicationMonitor thread is interrupted.", ie);
"PendingReplicationMonitor thread received exception. " + ie);
} }
} }
} }
@ -202,8 +206,8 @@ class PendingReplicationBlocks {
Iterator<Map.Entry<Block, PendingBlockInfo>> iter = Iterator<Map.Entry<Block, PendingBlockInfo>> iter =
pendingReplications.entrySet().iterator(); pendingReplications.entrySet().iterator();
long now = now(); long now = now();
if(FSNamesystem.LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q"); LOG.debug("PendingReplicationMonitor checking Q");
} }
while (iter.hasNext()) { while (iter.hasNext()) {
Map.Entry<Block, PendingBlockInfo> entry = iter.next(); Map.Entry<Block, PendingBlockInfo> entry = iter.next();
@ -213,8 +217,7 @@ class PendingReplicationBlocks {
synchronized (timedOutItems) { synchronized (timedOutItems) {
timedOutItems.add(block); timedOutItems.add(block);
} }
FSNamesystem.LOG.warn( LOG.warn("PendingReplicationMonitor timed out " + block);
"PendingReplicationMonitor timed out block " + block);
iter.remove(); iter.remove();
} }
} }

View File

@ -162,7 +162,7 @@ import org.mortbay.util.ajax.JSON;
@Metrics(context="dfs") @Metrics(context="dfs")
public class FSNamesystem implements FSConstants, FSNamesystemMBean, public class FSNamesystem implements FSConstants, FSNamesystemMBean,
FSClusterStats, NameNodeMXBean { FSClusterStats, NameNodeMXBean {
public static final Log LOG = LogFactory.getLog(FSNamesystem.class); static final Log LOG = LogFactory.getLog(FSNamesystem.class);
private static final ThreadLocal<StringBuilder> auditBuffer = private static final ThreadLocal<StringBuilder> auditBuffer =
new ThreadLocal<StringBuilder>() { new ThreadLocal<StringBuilder>() {

View File

@ -236,7 +236,7 @@ public class TestFiPipelines {
private static void initLoggers() { private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
@ -44,7 +45,7 @@ public class TestDatanodeDeath extends TestCase {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -49,7 +50,7 @@ public class TestFileAppend2 extends TestCase {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -24,6 +24,7 @@ import junit.extensions.TestSetup;
import junit.framework.Test; import junit.framework.Test;
import junit.framework.TestSuite; import junit.framework.TestSuite;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -46,7 +47,7 @@ public class TestFileAppend3 extends junit.framework.TestCase {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);

View File

@ -70,7 +70,7 @@ public class TestFileAppend4 {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
@ -54,7 +55,7 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
{ {
((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -27,6 +27,7 @@ import java.util.ArrayList;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
@ -48,7 +49,7 @@ import org.apache.log4j.Level;
public class TestFileCorruption extends TestCase { public class TestFileCorruption extends TestCase {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -25,6 +25,7 @@ import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
@ -59,7 +60,7 @@ public class TestFileCreation extends junit.framework.TestCase {
{ {
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); //((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -40,7 +41,7 @@ public class TestFileCreationClient extends junit.framework.TestCase {
{ {
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -33,7 +34,7 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
} }
public void testFileCreationDeleteParent() throws IOException { public void testFileCreationDeleteParent() throws IOException {

View File

@ -21,7 +21,7 @@ import java.util.ConcurrentModificationException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
/** /**
* Test empty file creation. * Test empty file creation.
@ -40,7 +40,7 @@ public class TestFileCreationEmpty extends junit.framework.TestCase {
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread t, Throwable e) { public void uncaughtException(Thread t, Throwable e) {
if (e instanceof ConcurrentModificationException) { if (e instanceof ConcurrentModificationException) {
FSNamesystem.LOG.error("t=" + t, e); LeaseManager.LOG.error("t=" + t, e);
isConcurrentModificationException = true; isConcurrentModificationException = true;
} }
} }

View File

@ -26,6 +26,7 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -51,7 +52,7 @@ import org.junit.Test;
*/ */
public class TestFileStatus { public class TestFileStatus {
{ {
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -57,7 +57,7 @@ public class TestLeaseRecovery2 {
{ {
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
} }
static final private long BLOCK_SIZE = 1024; static final private long BLOCK_SIZE = 1024;

View File

@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -53,7 +54,7 @@ public class TestMultiThreadedHflush {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);

View File

@ -160,7 +160,7 @@ public class TestPipelines {
private static void initLoggers() { private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -38,7 +39,7 @@ import org.junit.Test;
/** Test reading from hdfs while a file is being written. */ /** Test reading from hdfs while a file is being written. */
public class TestReadWhileWriting { public class TestReadWhileWriting {
{ {
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -33,7 +34,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
{ {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
} }
//TODO: un-comment checkFullFile once the lease recovery is done //TODO: un-comment checkFullFile once the lease recovery is done

View File

@ -25,6 +25,7 @@ import java.util.List;
import java.util.Random; import java.util.Random;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -56,7 +57,7 @@ public class TestBalancerWithMultipleNameNodes {
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
// ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF); // ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
} }

View File

@ -667,7 +667,7 @@ public class TestBlockReport {
private static void initLoggers() { private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL);
} }

View File

@ -84,7 +84,7 @@ public class TestBlockRecovery {
new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3); new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3);
static { static {
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
} }