HDFS-7712. Switch blockStateChangeLog to use slf4j.

This commit is contained in:
Andrew Wang 2015-02-03 15:01:16 -08:00
parent bd69fb2d44
commit 3ae38ec7df
30 changed files with 250 additions and 306 deletions

View File

@ -569,6 +569,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7681. Change ReplicaInputStreams constructor to take InputStream(s)
instead of FileDescriptor(s). (Joe Pallas via szetszwo)
HDFS-7712. Switch blockStateChangeLog to use slf4j. (wang)
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -260,7 +260,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
if (genStamp != r.getGenerationStamp()) {
r.getExpectedStorageLocation().removeBlock(this);
NameNode.blockStateChangeLog.info("BLOCK* Removing stale replica "
+ "from location: " + r.getExpectedStorageLocation());
+ "from location: {}", r.getExpectedStorageLocation());
}
}
}
@ -327,8 +327,8 @@ public class BlockInfoUnderConstruction extends BlockInfo {
if (primary != null) {
primary.getExpectedStorageLocation().getDatanodeDescriptor().addBlockToBeRecovered(this);
primary.setChosenAsPrimary(true);
NameNode.blockStateChangeLog.info("BLOCK* " + this
+ " recovery started, primary=" + primary);
NameNode.blockStateChangeLog.info(
"BLOCK* {} recovery started, primary={}", this, primary);
}
}

View File

@ -97,8 +97,8 @@ import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public class BlockManager {
static final Logger LOG = LoggerFactory.getLogger(BlockManager.class);
public static final Log blockLog = NameNode.blockStateChangeLog;
public static final Logger LOG = LoggerFactory.getLogger(BlockManager.class);
public static final Logger blockLog = NameNode.blockStateChangeLog;
private static final String QUEUE_REASON_CORRUPT_STATE =
"it has the wrong state or generation stamp";
@ -1003,8 +1003,8 @@ public class BlockManager {
final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: "
+ "Asking for blocks from an unrecorded node " + datanode);
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
" unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException(
"Datanode " + datanode + " not found.");
}
@ -1093,8 +1093,7 @@ public class BlockManager {
datanodes.append(node).append(" ");
}
if (datanodes.length() != 0) {
blockLog.info("BLOCK* addToInvalidates: " + b + " "
+ datanodes);
blockLog.info("BLOCK* addToInvalidates: {} {}", b, datanodes.toString());
}
}
@ -1127,8 +1126,7 @@ public class BlockManager {
// ignore the request for now. This could happen when BlockScanner
// thread of Datanode reports bad block before Block reports are sent
// by the Datanode on startup
blockLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
+ blk + " not found");
blockLog.info("BLOCK* findAndMarkBlockAsCorrupt: {} not found", blk);
return;
}
@ -1157,8 +1155,8 @@ public class BlockManager {
BlockCollection bc = b.corrupted.getBlockCollection();
if (bc == null) {
blockLog.info("BLOCK markBlockAsCorrupt: " + b
+ " cannot be marked as corrupt as it does not belong to any file");
blockLog.info("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
" corrupt as it does not belong to any file", b);
addToInvalidates(b.corrupted, node);
return;
}
@ -1205,7 +1203,7 @@ public class BlockManager {
*/
private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
) throws IOException {
blockLog.info("BLOCK* invalidateBlock: " + b + " on " + dn);
blockLog.info("BLOCK* invalidateBlock: {} on {}", b, dn);
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
if (node == null) {
throw new IOException("Cannot invalidate " + b
@ -1219,20 +1217,22 @@ public class BlockManager {
"invalidation of " + b + " on " + dn + " because " +
nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
"with potentially out-of-date block reports");
blockLog.info("BLOCK* invalidateBlocks: postponing " +
"invalidation of {} on {} because {} replica(s) are located on " +
"nodes with potentially out-of-date block reports", b, dn,
nr.replicasOnStaleNodes());
postponeBlock(b.corrupted);
return false;
} else if (nr.liveReplicas() >= 1) {
// If we have at least one copy on a live node, then we can delete it.
addToInvalidates(b.corrupted, dn);
removeStoredBlock(b.stored, node);
if(blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* invalidateBlocks: "
+ b + " on " + dn + " listed for deletion.");
}
blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.",
b, dn);
return true;
} else {
blockLog.info("BLOCK* invalidateBlocks: " + b
+ " on " + dn + " is the only copy and was not deleted");
blockLog.info("BLOCK* invalidateBlocks: {} on {} is the only copy and" +
" was not deleted", b, dn);
return false;
}
}
@ -1364,8 +1364,8 @@ public class BlockManager {
if ( (pendingReplications.getNumReplicas(block) > 0) ||
(blockHasEnoughRacks(block)) ) {
neededReplications.remove(block, priority); // remove from neededReplications
blockLog.info("BLOCK* Removing " + block
+ " from neededReplications as it has enough replicas");
blockLog.info("BLOCK* Removing {} from neededReplications as" +
" it has enough replicas", block);
continue;
}
}
@ -1434,8 +1434,8 @@ public class BlockManager {
(blockHasEnoughRacks(block)) ) {
neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null;
blockLog.info("BLOCK* Removing " + block
+ " from neededReplications as it has enough replicas");
blockLog.info("BLOCK* Removing {} from neededReplications as" +
" it has enough replicas", block);
continue;
}
}
@ -1459,11 +1459,8 @@ public class BlockManager {
// replications that fail after an appropriate amount of time.
pendingReplications.increment(block,
DatanodeStorageInfo.toDatanodeDescriptors(targets));
if(blockLog.isDebugEnabled()) {
blockLog.debug(
"BLOCK* block " + block
+ " is moved from neededReplications to pendingReplications");
}
blockLog.debug("BLOCK* block {} is moved from neededReplications to "
+ "pendingReplications", block);
// remove from neededReplications
if(numEffectiveReplicas + targets.length >= requiredReplication) {
@ -1485,16 +1482,13 @@ public class BlockManager {
targetList.append(' ');
targetList.append(targets[k].getDatanodeDescriptor());
}
blockLog.info("BLOCK* ask " + rw.srcNode
+ " to replicate " + rw.block + " to " + targetList);
blockLog.info("BLOCK* ask {} to replicate {} to {}", rw.srcNode,
rw.block, targetList);
}
}
}
if(blockLog.isDebugEnabled()) {
blockLog.debug(
"BLOCK* neededReplications = " + neededReplications.size()
+ " pendingReplications = " + pendingReplications.size());
}
blockLog.debug("BLOCK* neededReplications = {} pendingReplications = {}",
neededReplications.size(), pendingReplications.size());
return scheduledWork;
}
@ -1792,8 +1786,8 @@ public class BlockManager {
if (namesystem.isInStartupSafeMode()
&& storageInfo.getBlockReportCount() > 0) {
blockLog.info("BLOCK* processReport: "
+ "discarded non-initial block report from " + nodeID
+ " because namenode still in startup phase");
+ "discarded non-initial block report from {}"
+ " because namenode still in startup phase", nodeID);
return !node.hasStaleStorages();
}
@ -1813,9 +1807,8 @@ public class BlockManager {
if (invalidatedBlocks != null) {
for (Block b : invalidatedBlocks) {
blockLog.info("BLOCK* processReport: " + b + " on " + node
+ " size " + b.getNumBytes()
+ " does not belong to any file");
blockLog.info("BLOCK* processReport: {} on node {} size {} does not " +
"belong to any file", b, node, b.getNumBytes());
}
}
@ -1824,10 +1817,10 @@ public class BlockManager {
if (metrics != null) {
metrics.addBlockReport((int) (endTime - startTime));
}
blockLog.info("BLOCK* processReport: from storage " + storage.getStorageID()
+ " node " + nodeID + ", blocks: " + newReport.getNumberOfBlocks()
+ ", hasStaleStorages: " + node.hasStaleStorages()
+ ", processing time: " + (endTime - startTime) + " msecs");
blockLog.info("BLOCK* processReport: from storage {} node {}, " +
"blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage
.getStorageID(), nodeID, newReport.getNumberOfBlocks(),
node.hasStaleStorages(), (endTime - startTime));
return !node.hasStaleStorages();
}
@ -1934,8 +1927,8 @@ public class BlockManager {
numBlocksLogged++;
}
if (numBlocksLogged > maxNumBlocksToLog) {
blockLog.info("BLOCK* processReport: logged info for " + maxNumBlocksToLog
+ " of " + numBlocksLogged + " reported.");
blockLog.info("BLOCK* processReport: logged info for {} of {} " +
"reported.", maxNumBlocksToLog, numBlocksLogged);
}
for (Block b : toInvalidate) {
addToInvalidates(b, node);
@ -2414,9 +2407,9 @@ public class BlockManager {
}
if (storedBlock == null || storedBlock.getBlockCollection() == null) {
// If this block does not belong to anyfile, then we are done.
blockLog.info("BLOCK* addStoredBlock: " + block + " on "
+ node + " size " + block.getNumBytes()
+ " but it does not belong to any file");
blockLog.info("BLOCK* addStoredBlock: {} on {} size {} but it does not" +
" belong to any file", block, node, block.getNumBytes());
// we could add this block to invalidate set of this datanode.
// it will happen in next block report otherwise.
return block;
@ -2438,6 +2431,8 @@ public class BlockManager {
blockLog.warn("BLOCK* addStoredBlock: " + "block " + storedBlock
+ " moved to storageType " + storageInfo.getStorageType()
+ " on node " + node);
blockLog.warn("BLOCK* addStoredBlock: block {} moved to storageType " +
"{} on node {}", storedBlock, storageInfo.getStorageType(), node);
} else {
// if the same block is added again and the replica was corrupt
// previously because of a wrong gen stamp, remove it from the
@ -2445,9 +2440,9 @@ public class BlockManager {
corruptReplicas.removeFromCorruptReplicasMap(block, node,
Reason.GENSTAMP_MISMATCH);
curReplicaDelta = 0;
blockLog.warn("BLOCK* addStoredBlock: "
+ "Redundant addStoredBlock request received for " + storedBlock
+ " on " + node + " size " + storedBlock.getNumBytes());
blockLog.warn("BLOCK* addStoredBlock: Redundant addStoredBlock request"
+ " received for {} on node {} size {}", storedBlock, node,
storedBlock.getNumBytes());
}
// Now check for completion of blocks and safe block count
@ -2515,7 +2510,7 @@ public class BlockManager {
storedBlock.appendStringTo(sb);
sb.append(" size " )
.append(storedBlock.getNumBytes());
blockLog.info(sb);
blockLog.info(sb.toString());
}
/**
* Invalidate corrupt replicas.
@ -2545,8 +2540,8 @@ public class BlockManager {
removedFromBlocksMap = false;
}
} catch (IOException e) {
blockLog.info("invalidateCorruptReplicas "
+ "error in deleting bad block " + blk + " on " + node, e);
blockLog.info("invalidateCorruptReplicas error in deleting bad block"
+ " {} on {}", blk, node, e);
removedFromBlocksMap = false;
}
}
@ -2878,7 +2873,7 @@ public class BlockManager {
//
addToInvalidates(b, cur.getDatanodeDescriptor());
blockLog.info("BLOCK* chooseExcessReplicates: "
+"("+cur+", "+b+") is added to invalidated blocks set");
+"({}, {}) is added to invalidated blocks set", cur, b);
}
}
@ -2912,11 +2907,8 @@ public class BlockManager {
}
if (excessBlocks.add(block)) {
excessBlocksCount.incrementAndGet();
if(blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* addToExcessReplicate:"
+ " (" + dn + ", " + block
+ ") is added to excessReplicateMap");
}
blockLog.debug("BLOCK* addToExcessReplicate: ({}, {}) is added to"
+ " excessReplicateMap", dn, block);
}
}
@ -2925,17 +2917,12 @@ public class BlockManager {
* removed block is still valid.
*/
public void removeStoredBlock(Block block, DatanodeDescriptor node) {
if(blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* removeStoredBlock: "
+ block + " from " + node);
}
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", block, node);
assert (namesystem.hasWriteLock());
{
if (!blocksMap.removeNode(block, node)) {
if(blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* removeStoredBlock: "
+ block + " has already been removed from node " + node);
}
blockLog.debug("BLOCK* removeStoredBlock: {} has already been" +
" removed from node {}", block, node);
return;
}
@ -2960,10 +2947,8 @@ public class BlockManager {
if (excessBlocks != null) {
if (excessBlocks.remove(block)) {
excessBlocksCount.decrementAndGet();
if(blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* removeStoredBlock: "
+ block + " is removed from excessBlocks");
}
blockLog.debug("BLOCK* removeStoredBlock: {} is removed from " +
"excessBlocks", block);
if (excessBlocks.size() == 0) {
excessReplicateMap.remove(node.getDatanodeUuid());
}
@ -3016,8 +3001,8 @@ public class BlockManager {
if (delHint != null && delHint.length() != 0) {
delHintNode = datanodeManager.getDatanode(delHint);
if (delHintNode == null) {
blockLog.warn("BLOCK* blockReceived: " + block
+ " is expected to be removed from an unrecorded node " + delHint);
blockLog.warn("BLOCK* blockReceived: {} is expected to be removed " +
"from an unrecorded node {}", block, delHint);
}
}
@ -3056,13 +3041,12 @@ public class BlockManager {
numBlocksLogged++;
}
if (numBlocksLogged > maxNumBlocksToLog) {
blockLog.info("BLOCK* addBlock: logged info for " + maxNumBlocksToLog
+ " of " + numBlocksLogged + " reported.");
blockLog.info("BLOCK* addBlock: logged info for {} of {} reported.",
maxNumBlocksToLog, numBlocksLogged);
}
for (Block b : toInvalidate) {
blockLog.info("BLOCK* addBlock: block "
+ b + " on " + node + " size " + b.getNumBytes()
+ " does not belong to any file");
blockLog.info("BLOCK* addBlock: block {} on node {} size {} does not " +
"belong to any file", b, node, b.getNumBytes());
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
@ -3085,10 +3069,8 @@ public class BlockManager {
int receiving = 0;
final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
if (node == null || !node.isAlive) {
blockLog
.warn("BLOCK* processIncrementalBlockReport"
+ " is received from dead or unregistered node "
+ nodeID);
blockLog.warn("BLOCK* processIncrementalBlockReport"
+ " is received from dead or unregistered node {}", nodeID);
throw new IOException(
"Got incremental block report from unregistered or dead node");
}
@ -3127,17 +3109,12 @@ public class BlockManager {
assert false : msg; // if assertions are enabled, throw.
break;
}
if (blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* block "
+ (rdbi.getStatus()) + ": " + rdbi.getBlock()
+ " is received from " + nodeID);
}
}
if (blockLog.isDebugEnabled()) {
blockLog.debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
+ nodeID + " receiving: " + receiving + ", " + " received: " + received
+ ", " + " deleted: " + deleted);
blockLog.debug("BLOCK* block {}: {} is received from {}",
rdbi.getStatus(), rdbi.getBlock(), nodeID);
}
blockLog.debug("*BLOCK* NameNode.processIncrementalBlockReport: from "
+ "{} receiving: {}, received: {}, deleted: {}", nodeID, receiving,
received, deleted);
}
/**
@ -3461,10 +3438,8 @@ public class BlockManager {
} finally {
namesystem.writeUnlock();
}
if (blockLog.isInfoEnabled()) {
blockLog.info("BLOCK* " + getClass().getSimpleName()
+ ": ask " + dn + " to delete " + toInvalidate);
}
blockLog.info("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
dn, toInvalidate);
return toInvalidate.size();
}

View File

@ -73,18 +73,15 @@ public class CorruptReplicasMap{
}
if (!nodes.keySet().contains(dn)) {
NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
blk.getBlockName() +
" added as corrupt on " + dn +
" by " + Server.getRemoteIp() +
reasonText);
NameNode.blockStateChangeLog.info(
"BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on "
+ "{} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(),
reasonText);
} else {
NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
"duplicate requested for " +
blk.getBlockName() + " to add as corrupt " +
"on " + dn +
" by " + Server.getRemoteIp() +
reasonText);
NameNode.blockStateChangeLog.info(
"BLOCK NameSystem.addToCorruptReplicasMap: duplicate requested for" +
" {} to add as corrupt on {} by {} {}", blk.getBlockName(), dn,
Server.getRemoteIp(), reasonText);
}
// Add the node or update the reason.
nodes.put(dn, reasonCode);

View File

@ -491,7 +491,7 @@ public class DatanodeManager {
if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
final UnregisteredNodeException e = new UnregisteredNodeException(
nodeID, node);
NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
NameNode.stateChangeLog.error("BLOCK* NameSystem.getDatanode: "
+ e.getLocalizedMessage());
throw e;
}

View File

@ -112,8 +112,8 @@ class InvalidateBlocks {
if (set.add(block)) {
numBlocks++;
if (log) {
NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+ ": add " + block + " to " + datanode);
NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
getClass().getSimpleName(), block, datanode);
}
}
}

View File

@ -195,15 +195,12 @@ class UnderReplicatedBlocks implements Iterable<Block> {
expectedReplicas == 1) {
corruptReplOneBlocks++;
}
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.add:"
+ block
+ " has only " + curReplicas
+ " replicas and need " + expectedReplicas
+ " replicas so is added to neededReplications"
+ " at priority level " + priLevel);
}
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.add: {}"
+ " has only {} replicas and need {} replicas so is added to" +
" neededReplications at priority level {}", block, curReplicas,
expectedReplicas, priLevel);
return true;
}
return false;
@ -247,24 +244,18 @@ class UnderReplicatedBlocks implements Iterable<Block> {
boolean remove(Block block, int priLevel) {
if(priLevel >= 0 && priLevel < LEVEL
&& priorityQueues.get(priLevel).remove(block)) {
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
+ "Removing block " + block
+ " from priority queue "+ priLevel);
}
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block {}" +
" from priority queue {}", block, priLevel);
return true;
} else {
// Try to remove the block from all queues if the block was
// not found in the queue for the given priority level.
for (int i = 0; i < LEVEL; i++) {
if (priorityQueues.get(i).remove(block)) {
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
+ "Removing block " + block
+ " from priority queue "+ i);
}
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block" +
" {} from priority queue {}", block, priLevel);
return true;
}
}
@ -310,15 +301,12 @@ class UnderReplicatedBlocks implements Iterable<Block> {
remove(block, oldPri);
}
if(priorityQueues.get(curPri).add(block)) {
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.update:"
+ block
+ " has only "+ curReplicas
+ " replicas and needs " + curExpectedReplicas
+ " replicas so is added to neededReplications"
+ " at priority level " + curPri);
}
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.update: {} has only {} " +
"replicas and needs {} replicas so is added to " +
"neededReplications at priority level {}", block, curReplicas,
curExpectedReplicas, curPri);
}
if (oldPri != curPri || expectedReplicasDelta != 0) {
// corruptReplOneBlocks could possibly change

View File

@ -411,7 +411,7 @@ public class BackupNode extends NameNode {
errorMsg = "Incompatible build versions: active name-node BV = "
+ nsInfo.getBuildVersion() + "; backup node BV = "
+ Storage.getBuildVersion();
LOG.fatal(errorMsg);
LOG.error(errorMsg);
throw new IOException(errorMsg);
}
assert HdfsConstants.NAMENODE_LAYOUT_VERSION == nsInfo.getLayoutVersion() :

View File

@ -21,9 +21,6 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -43,11 +40,21 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.ha.*;
import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressMetrics;
import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@ -58,7 +65,6 @@ import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tracing.TraceAdminProtocol;
@ -67,6 +73,9 @@ import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.LogManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.ObjectName;
@ -84,7 +93,41 @@ import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PLUGINS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
@ -253,9 +296,12 @@ public class NameNode implements NameNodeStatusMXBean {
}
public static final int DEFAULT_PORT = 8020;
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
public static final Log blockStateChangeLog = LogFactory.getLog("BlockStateChange");
public static final Logger LOG =
LoggerFactory.getLogger(NameNode.class.getName());
public static final Logger stateChangeLog =
LoggerFactory.getLogger("org.apache.hadoop.hdfs.StateChange");
public static final Logger blockStateChangeLog =
LoggerFactory.getLogger("BlockStateChange");
public static final HAState ACTIVE_STATE = new ActiveState();
public static final HAState STANDBY_STATE = new StandbyState();
@ -349,7 +395,7 @@ public class NameNode implements NameNodeStatusMXBean {
return;
}
LOG.info(FS_DEFAULT_NAME_KEY + " is " + nnAddr);
LOG.info("{} is {}", FS_DEFAULT_NAME_KEY, nnAddr);
URI nnUri = URI.create(nnAddr);
String nnHost = nnUri.getHost();
@ -369,8 +415,8 @@ public class NameNode implements NameNodeStatusMXBean {
clientNamenodeAddress = null;
return;
}
LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
+ " this namenode/service.");
LOG.info("Clients are to use {} to access"
+ " this namenode/service.", clientNamenodeAddress );
}
/**
@ -391,7 +437,7 @@ public class NameNode implements NameNodeStatusMXBean {
*/
public static void setServiceAddress(Configuration conf,
String address) {
LOG.info("Setting ADDRESS " + address);
LOG.info("Setting ADDRESS {}", address);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
}
@ -1009,7 +1055,7 @@ public class NameNode implements NameNodeStatusMXBean {
initializeGenericKeys(conf, nsId, namenodeId);
if (conf.get(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY) == null) {
LOG.fatal("No shared edits directory configured for namespace " +
LOG.error("No shared edits directory configured for namespace " +
nsId + " namenode " + namenodeId);
return false;
}
@ -1183,7 +1229,7 @@ public class NameNode implements NameNodeStatusMXBean {
i++;
if (i >= argsLen) {
// if no cluster id specified, return null
LOG.fatal("Must specify a valid cluster ID after the "
LOG.error("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
@ -1193,7 +1239,7 @@ public class NameNode implements NameNodeStatusMXBean {
clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
clusterId.equalsIgnoreCase(
StartupOption.NONINTERACTIVE.getName())) {
LOG.fatal("Must specify a valid cluster ID after the "
LOG.error("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
@ -1230,7 +1276,7 @@ public class NameNode implements NameNodeStatusMXBean {
i += 2;
startOpt.setClusterId(args[i]);
} else {
LOG.fatal("Must specify a valid cluster ID after the "
LOG.error("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
@ -1244,7 +1290,7 @@ public class NameNode implements NameNodeStatusMXBean {
i += 1;
}
} else {
LOG.fatal("Unknown upgrade flag " + flag);
LOG.error("Unknown upgrade flag " + flag);
return null;
}
}
@ -1252,7 +1298,7 @@ public class NameNode implements NameNodeStatusMXBean {
startOpt = StartupOption.ROLLINGUPGRADE;
++i;
if (i >= argsLen) {
LOG.fatal("Must specify a rolling upgrade startup option "
LOG.error("Must specify a rolling upgrade startup option "
+ RollingUpgradeStartupOption.getAllOptionString());
return null;
}
@ -1274,7 +1320,7 @@ public class NameNode implements NameNodeStatusMXBean {
} else if (StartupOption.FORCE.getName().equals(args[i])) {
startOpt.setForceFormat(true);
} else {
LOG.fatal("Invalid argument: " + args[i]);
LOG.error("Invalid argument: " + args[i]);
return null;
}
}
@ -1508,13 +1554,14 @@ public class NameNode implements NameNodeStatusMXBean {
}
try {
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
StringUtils.startupShutdownMessage(NameNode.class, argv,
(org.apache.commons.logging.Log) LogManager.getLogger(LOG.getName()));
NameNode namenode = createNameNode(argv, null);
if (namenode != null) {
namenode.join();
}
} catch (Throwable e) {
LOG.fatal("Failed to start namenode.", e);
LOG.error("Failed to start namenode.", e);
terminate(1, e);
}
}
@ -1641,7 +1688,7 @@ public class NameNode implements NameNodeStatusMXBean {
String message = "Error encountered requiring NN shutdown. " +
"Shutting down immediately.";
try {
LOG.fatal(message, t);
LOG.error(message, t);
} catch (Throwable ignored) {
// This is unlikely to happen, but there's nothing we can do if it does.
}

View File

@ -37,7 +37,6 @@ import java.util.Set;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
@ -177,6 +176,7 @@ import org.apache.hadoop.tracing.TraceAdminProtocolPB;
import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
import org.slf4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
@ -187,9 +187,10 @@ import com.google.protobuf.BlockingService;
*/
class NameNodeRpcServer implements NamenodeProtocols {
private static final Log LOG = NameNode.LOG;
private static final Log stateChangeLog = NameNode.stateChangeLog;
private static final Log blockStateChangeLog = NameNode.blockStateChangeLog;
private static final Logger LOG = NameNode.LOG;
private static final Logger stateChangeLog = NameNode.stateChangeLog;
private static final Logger blockStateChangeLog = NameNode
.blockStateChangeLog;
// Dependencies from other parts of NN.
protected final FSNamesystem namesystem;
@ -1508,7 +1509,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
IncorrectVersionException ive = new IncorrectVersionException(
messagePrefix + " and CTime of DN ('" + dnCTime +
"') does not match CTime of NN ('" + nnCTime + "')");
LOG.warn(ive);
LOG.warn(ive.toString(), ive);
throw ive;
} else {
LOG.info(messagePrefix +

View File

@ -23,7 +23,6 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.URI;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -48,7 +47,7 @@ import org.junit.Test;
abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
}
protected static MiniDFSCluster cluster;

View File

@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha
.ConfiguredFailoverProxyProvider;
@ -1659,8 +1660,11 @@ public class DFSTestUtil {
}
public static void setNameNodeLogLevel(Level level) {
GenericTestUtils.setLogLevel(LogFactory.getLog(FSNamesystem.class), level);
GenericTestUtils.setLogLevel(LogFactory.getLog(BlockManager.class), level);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
GenericTestUtils.setLogLevel(BlockManager.LOG, level);
GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
GenericTestUtils.setLogLevel(NameNode.LOG, level);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
}

View File

@ -22,8 +22,6 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -33,10 +31,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -45,13 +41,10 @@ import org.junit.Test;
*/
public class TestDatanodeDeath {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL);
}
static final int blockSize = 8192;

View File

@ -28,7 +28,6 @@ import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
@ -39,12 +38,10 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -55,12 +52,9 @@ import org.junit.Test;
public class TestFileAppend2 {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
static final int numBlocks = 5;

View File

@ -28,12 +28,12 @@ import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.test.GenericTestUtils;
import org.mockito.invocation.InvocationOnMock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import org.mockito.stubbing.Answer;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -47,9 +47,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.log4j.Level;
import org.junit.AfterClass;
@ -60,12 +57,10 @@ import org.junit.Test;
/** This class implements some of tests posted in HADOOP-2658. */
public class TestFileAppend3 {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL);
}
static final long BLOCK_SIZE = 64 * 1024;

View File

@ -45,10 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
@ -72,11 +69,9 @@ public class TestFileAppend4 {
final boolean simulatedStorage = false;
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
@Before

View File

@ -29,9 +29,6 @@ import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
@ -39,27 +36,27 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import org.slf4j.Logger;
/**
* A JUnit test for corrupted file handling.
*/
public class TestFileCorruption {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
static Log LOG = ((Log4JLogger)NameNode.stateChangeLog);
static Logger LOG = NameNode.stateChangeLog;
/** check if DFS can handle corrupted blocks properly */
@Test

View File

@ -20,23 +20,16 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.log4j.Level;
import org.junit.Test;
public class TestFileCreationDelete {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
@Test

View File

@ -26,7 +26,6 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
@ -35,8 +34,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
@ -159,9 +156,8 @@ public class TestPipelines {
}
private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
}

View File

@ -22,25 +22,18 @@ import static org.mockito.Mockito.spy;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
public class TestRenameWhileOpen {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
//TODO: un-comment checkFullFile once the lease recovery is done

View File

@ -26,7 +26,6 @@ import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -43,9 +42,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
@ -57,9 +53,7 @@ public class TestBalancerWithMultipleNameNodes {
static final Log LOG = Balancer.LOG;
{
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}

View File

@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@ -810,10 +809,9 @@ public abstract class BlockReportTestBase {
}
private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) BlockReportTestBase.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(BlockReportTestBase.LOG, Level.ALL);
}
private Block findBlock(Path path, long size) throws IOException {

View File

@ -79,12 +79,13 @@ public class TestIncrementalBrVariations {
private DatanodeRegistration dn0Reg; // DataNodeRegistration for dn0
static {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) BlockManager.blockLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestIncrementalBrVariations.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
GenericTestUtils.setLogLevel(BlockManager.blockLog, Level.ALL);
GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, Level.ALL);
GenericTestUtils
.setLogLevel(LogFactory.getLog(FSNamesystem.class), Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(TestIncrementalBrVariations.LOG, Level.ALL);
}
@Before

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -36,10 +35,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.JMXGet;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Rule;
@ -67,9 +66,8 @@ import static org.junit.Assert.fail;
public abstract class LazyPersistTestCase {
static {
((Log4JLogger) NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(FsDatasetImpl.LOG, Level.ALL);
}
protected static final int BLOCK_SIZE = 5 * 1024 * 1024;

View File

@ -28,11 +28,11 @@ import java.util.List;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -60,6 +60,7 @@ import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
@ -144,12 +145,11 @@ public class NNThroughputBenchmark implements Tool {
static void setNameNodeLoggingLevel(Level logLevel) {
LOG.fatal("Log level = " + logLevel.toString());
// change log level to NameNode logs
LogManager.getLogger(NameNode.class.getName()).setLevel(logLevel);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(logLevel);
LogManager.getLogger(NetworkTopology.class.getName()).setLevel(logLevel);
LogManager.getLogger(FSNamesystem.class.getName()).setLevel(logLevel);
LogManager.getLogger(LeaseManager.class.getName()).setLevel(logLevel);
LogManager.getLogger(Groups.class.getName()).setLevel(logLevel);
DFSTestUtil.setNameNodeLogLevel(logLevel);
GenericTestUtils.setLogLevel(LogManager.getLogger(
NetworkTopology.class.getName()), logLevel);
GenericTestUtils.setLogLevel(LogManager.getLogger(
Groups.class.getName()), logLevel);
}
/**

View File

@ -23,7 +23,6 @@ import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -44,9 +43,7 @@ import org.junit.Test;
public class TestFsckWithMultipleNameNodes {
static final Log LOG = LogFactory.getLog(TestFsckWithMultipleNameNodes.class);
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}

View File

@ -28,7 +28,6 @@ import java.util.Collection;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -40,13 +39,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestFileCorruption;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.slf4j.Logger;
/**
* This class tests the listCorruptFileBlocks API.
@ -57,7 +54,7 @@ import org.junit.Test;
* blocks/files are also returned.
*/
public class TestListCorruptFileBlocks {
static final Log LOG = NameNode.stateChangeLog;
static final Logger LOG = NameNode.stateChangeLog;
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
@Test (timeout=300000)

View File

@ -84,9 +84,8 @@ public class TestHASafeMode {
private MiniDFSCluster cluster;
static {
((Log4JLogger)LogFactory.getLog(FSImage.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL);
}
@Before

View File

@ -22,11 +22,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -34,8 +34,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
@ -52,9 +50,7 @@ import org.junit.Test;
public class TestWebHdfsDataLocality {
static final Log LOG = LogFactory.getLog(TestWebHdfsDataLocality.class);
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
private static final String RACK0 = "/rack0";

View File

@ -21,7 +21,6 @@ import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
@ -29,14 +28,12 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
@ -51,11 +48,9 @@ public class TestWebHdfsWithMultipleNameNodes {
static private void setLogLevel() {
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
private static final Configuration conf = new HdfsConfiguration();