From db71de2e11cfa56a254ef4c92fea5ef4f8c19100 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 9 Nov 2012 18:07:04 +0000 Subject: [PATCH] HDFS-4080. Add a separate logger for block state change logs to enable turning off those logs. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1407566 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/main/conf/log4j.properties | 7 ++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../BlockInfoUnderConstruction.java | 4 +- .../server/blockmanagement/BlockManager.java | 87 ++++++++++--------- .../blockmanagement/CorruptReplicasMap.java | 4 +- .../blockmanagement/InvalidateBlocks.java | 2 +- .../UnderReplicatedBlocks.java | 16 ++-- .../hadoop/hdfs/server/namenode/NameNode.java | 1 + .../server/namenode/NameNodeRpcServer.java | 9 +- .../apache/hadoop/hdfs/TestDatanodeDeath.java | 1 + .../apache/hadoop/hdfs/TestFileAppend2.java | 1 + 11 files changed, 74 insertions(+), 60 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 4c49da0ad9f..b92ad27b67a 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -99,6 +99,13 @@ log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} log4j.appender.TLA.layout=org.apache.log4j.PatternLayout log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# +# HDFS block state change log from block manager +# +# Uncomment the following to suppress normal block state change +# messages from BlockManager in NameNode. +#log4j.logger.BlockStateChange=WARN + # #Security appender # diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 95a6b1c380f..78f92cbd5ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1961,6 +1961,8 @@ Release 0.23.5 - UNRELEASED NEW FEATURES IMPROVEMENTS + HDFS-4080. Add a separate logger for block state change logs to enable turning + off those logs. (Kihwal Lee via suresh) OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index 5c0db1bb9b4..36b3598b2c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -233,7 +233,7 @@ public void initializeBlockRecovery(long recoveryId) { setBlockUCState(BlockUCState.UNDER_RECOVERY); blockRecoveryId = recoveryId; if (replicas.size() == 0) { - NameNode.stateChangeLog.warn("BLOCK*" + NameNode.blockStateChangeLog.warn("BLOCK*" + " BlockInfoUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed."); } @@ -245,7 +245,7 @@ public void initializeBlockRecovery(long recoveryId) { primaryNodeIndex = j; DatanodeDescriptor primary = replicas.get(j).getExpectedLocation(); primary.addBlockToBeRecovered(this); - NameNode.stateChangeLog.info("BLOCK* " + this + NameNode.blockStateChangeLog.info("BLOCK* " + this + " recovery started, primary=" + primary); return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 3a971fbf0d4..74dbfbc295f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -83,6 +83,7 @@ public class BlockManager { static final Log LOG = LogFactory.getLog(BlockManager.class); + static final Log blockLog = NameNode.blockStateChangeLog; /** Default load factor of map */ public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f; @@ -872,7 +873,7 @@ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { - NameNode.stateChangeLog.warn("BLOCK* getBlocks: " + blockLog.warn("BLOCK* getBlocks: " + "Asking for blocks from an unrecorded node " + datanode); throw new HadoopIllegalArgumentException( "Datanode " + datanode + " not found."); @@ -950,7 +951,7 @@ private void addToInvalidates(Block b) { datanodes.append(node).append(" "); } if (datanodes.length() != 0) { - NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " " + blockLog.info("BLOCK* addToInvalidates: " + b + " " + datanodes); } } @@ -971,7 +972,7 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, // ignore the request for now. This could happen when BlockScanner // thread of Datanode reports bad block before Block reports are sent // by the Datanode on startup - NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: " + blockLog.info("BLOCK* findAndMarkBlockAsCorrupt: " + blk + " not found"); return; } @@ -988,7 +989,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, BlockCollection bc = b.corrupted.getBlockCollection(); if (bc == null) { - NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + b + blockLog.info("BLOCK markBlockAsCorrupt: " + b + " cannot be marked as corrupt as it does not belong to any file"); addToInvalidates(b.corrupted, node); return; @@ -1013,7 +1014,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, */ private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn ) throws IOException { - NameNode.stateChangeLog.info("BLOCK* invalidateBlock: " + b + " on " + dn); + blockLog.info("BLOCK* invalidateBlock: " + b + " on " + dn); DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate " + b @@ -1023,7 +1024,7 @@ private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn // Check how many copies we have of the block NumberReplicas nr = countNodes(b.stored); if (nr.replicasOnStaleNodes() > 0) { - NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " + + blockLog.info("BLOCK* invalidateBlocks: postponing " + "invalidation of " + b + " on " + dn + " because " + nr.replicasOnStaleNodes() + " replica(s) are located on nodes " + "with potentially out-of-date block reports"); @@ -1033,12 +1034,12 @@ private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn // If we have at least one copy on a live node, then we can delete it. addToInvalidates(b.corrupted, dn); removeStoredBlock(b.stored, node); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* invalidateBlocks: " + b + " on " + dn + " listed for deletion."); } } else { - NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b + blockLog.info("BLOCK* invalidateBlocks: " + b + " on " + dn + " is the only copy and was not deleted"); } } @@ -1160,7 +1161,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { (blockHasEnoughRacks(block)) ) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); - NameNode.stateChangeLog.info("BLOCK* Removing " + block + blockLog.info("BLOCK* Removing " + block + " from neededReplications as it has enough replicas"); continue; } @@ -1235,7 +1236,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); rw.targets = null; - NameNode.stateChangeLog.info("BLOCK* Removing " + block + blockLog.info("BLOCK* Removing " + block + " from neededReplications as it has enough replicas"); continue; } @@ -1261,8 +1262,8 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { // The reason we use 'pending' is so we can retry // replications that fail after an appropriate amount of time. pendingReplications.increment(block, targets.length); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(blockLog.isDebugEnabled()) { + blockLog.debug( "BLOCK* block " + block + " is moved from neededReplications to pendingReplications"); } @@ -1278,7 +1279,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { namesystem.writeUnlock(); } - if (NameNode.stateChangeLog.isInfoEnabled()) { + if (blockLog.isInfoEnabled()) { // log which blocks have been scheduled for replication for(ReplicationWork rw : work){ DatanodeDescriptor[] targets = rw.targets; @@ -1288,13 +1289,13 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { targetList.append(' '); targetList.append(targets[k]); } - NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode + blockLog.info("BLOCK* ask " + rw.srcNode + " to replicate " + rw.block + " to " + targetList); } } } - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(blockLog.isDebugEnabled()) { + blockLog.debug( "BLOCK* neededReplications = " + neededReplications.size() + " pendingReplications = " + pendingReplications.size()); } @@ -1504,7 +1505,7 @@ public void processReport(final DatanodeID nodeID, final String poolId, // To minimize startup time, we discard any second (or later) block reports // that we receive while still in startup phase. if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) { - NameNode.stateChangeLog.info("BLOCK* processReport: " + blockLog.info("BLOCK* processReport: " + "discarded non-initial block report from " + nodeID + " because namenode still in startup phase"); return; @@ -1536,7 +1537,7 @@ public void processReport(final DatanodeID nodeID, final String poolId, // Log the block report processing stats from Namenode perspective NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime)); - NameNode.stateChangeLog.info("BLOCK* processReport: from " + blockLog.info("BLOCK* processReport: from " + nodeID + ", blocks: " + newReport.getNumberOfBlocks() + ", processing time: " + (endTime - startTime) + " msecs"); } @@ -1596,7 +1597,7 @@ private void processReport(final DatanodeDescriptor node, addStoredBlock(b, node, null, true); } for (Block b : toInvalidate) { - NameNode.stateChangeLog.info("BLOCK* processReport: " + blockLog.info("BLOCK* processReport: " + b + " on " + node + " size " + b.getNumBytes() + " does not belong to any file"); addToInvalidates(b, node); @@ -2034,7 +2035,7 @@ private Block addStoredBlock(final BlockInfo block, } if (storedBlock == null || storedBlock.getBlockCollection() == null) { // If this block does not belong to anyfile, then we are done. - NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on " + blockLog.info("BLOCK* addStoredBlock: " + block + " on " + node + " size " + block.getNumBytes() + " but it does not belong to any file"); // we could add this block to invalidate set of this datanode. @@ -2056,7 +2057,7 @@ private Block addStoredBlock(final BlockInfo block, } } else { curReplicaDelta = 0; - NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: " + blockLog.warn("BLOCK* addStoredBlock: " + "Redundant addStoredBlock request received for " + storedBlock + " on " + node + " size " + storedBlock.getNumBytes()); } @@ -2115,7 +2116,7 @@ private Block addStoredBlock(final BlockInfo block, } private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { - if (!NameNode.stateChangeLog.isInfoEnabled()) { + if (!blockLog.isInfoEnabled()) { return; } @@ -2126,7 +2127,7 @@ private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { storedBlock.appendStringTo(sb); sb.append(" size " ) .append(storedBlock.getNumBytes()); - NameNode.stateChangeLog.info(sb); + blockLog.info(sb); } /** * Invalidate corrupt replicas. @@ -2153,7 +2154,7 @@ private void invalidateCorruptReplicas(BlockInfo blk) { try { invalidateBlock(new BlockToMarkCorrupt(blk, null), node); } catch (IOException e) { - NameNode.stateChangeLog.info("invalidateCorruptReplicas " + blockLog.info("invalidateCorruptReplicas " + "error in deleting bad block " + blk + " on " + node, e); gotException = true; } @@ -2391,7 +2392,7 @@ private void chooseExcessReplicates(Collection nonExcess, // upon giving instructions to the namenode. // addToInvalidates(b, cur); - NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: " + blockLog.info("BLOCK* chooseExcessReplicates: " +"("+cur+", "+b+") is added to invalidated blocks set"); } } @@ -2405,8 +2406,8 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) { } if (excessBlocks.add(block)) { excessBlocksCount++; - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:" + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* addToExcessReplicate:" + " (" + dn + ", " + block + ") is added to excessReplicateMap"); } @@ -2418,15 +2419,15 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) { * removed block is still valid. */ public void removeStoredBlock(Block block, DatanodeDescriptor node) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* removeStoredBlock: " + block + " from " + node); } assert (namesystem.hasWriteLock()); { if (!blocksMap.removeNode(block, node)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* removeStoredBlock: " + block + " has already been removed from node " + node); } return; @@ -2453,8 +2454,8 @@ public void removeStoredBlock(Block block, DatanodeDescriptor node) { if (excessBlocks != null) { if (excessBlocks.remove(block)) { excessBlocksCount--; - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* removeStoredBlock: " + block + " is removed from excessBlocks"); } if (excessBlocks.size() == 0) { @@ -2497,7 +2498,7 @@ void addBlock(DatanodeDescriptor node, Block block, String delHint) if (delHint != null && delHint.length() != 0) { delHintNode = datanodeManager.getDatanode(delHint); if (delHintNode == null) { - NameNode.stateChangeLog.warn("BLOCK* blockReceived: " + block + blockLog.warn("BLOCK* blockReceived: " + block + " is expected to be removed from an unrecorded node " + delHint); } } @@ -2532,7 +2533,7 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, Block block, addStoredBlock(b, node, delHintNode, true); } for (Block b : toInvalidate) { - NameNode.stateChangeLog.info("BLOCK* addBlock: block " + blockLog.info("BLOCK* addBlock: block " + b + " on " + node + " size " + b.getNumBytes() + " does not belong to any file"); addToInvalidates(b, node); @@ -2558,7 +2559,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, try { final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { - NameNode.stateChangeLog + blockLog .warn("BLOCK* processIncrementalBlockReport" + " is received from dead or unregistered node " + nodeID); @@ -2585,19 +2586,19 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, String msg = "Unknown block status code reported by " + nodeID + ": " + rdbi; - NameNode.stateChangeLog.warn(msg); + blockLog.warn(msg); assert false : msg; // if assertions are enabled, throw. break; } - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* block " + if (blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* block " + (rdbi.getStatus()) + ": " + rdbi.getBlock() + " is received from " + nodeID); } } } finally { namesystem.writeUnlock(); - NameNode.stateChangeLog + blockLog .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from " + nodeID + " receiving: " + receiving + ", " @@ -2890,8 +2891,8 @@ private int invalidateWorkForOneNode(String nodeId) { } finally { namesystem.writeUnlock(); } - if (NameNode.stateChangeLog.isInfoEnabled()) { - NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName() + if (blockLog.isInfoEnabled()) { + blockLog.info("BLOCK* " + getClass().getSimpleName() + ": ask " + dn + " to delete " + toInvalidate); } return toInvalidate.size(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 440e3d40569..4613199ee6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -63,13 +63,13 @@ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, if (!nodes.contains(dn)) { nodes.add(dn); - NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ + NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ blk.getBlockName() + " added as corrupt on " + dn + " by " + Server.getRemoteIp() + reasonText); } else { - NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ + NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ "duplicate requested for " + blk.getBlockName() + " to add as corrupt " + "on " + dn + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 2a10ee2253c..841ca41755f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -86,7 +86,7 @@ synchronized void add(final Block block, final DatanodeInfo datanode, if (set.add(block)) { numBlocks++; if (log) { - NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName() + NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName() + ": add " + block + " to " + datanode); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java index 0759d533633..779b445cece 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java @@ -190,8 +190,8 @@ synchronized boolean add(Block block, int priLevel = getPriority(block, curReplicas, decomissionedReplicas, expectedReplicas); if(priorityQueues.get(priLevel).add(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.add:" + block + " has only " + curReplicas @@ -233,8 +233,8 @@ synchronized boolean remove(Block block, boolean remove(Block block, int priLevel) { if(priLevel >= 0 && priLevel < LEVEL && priorityQueues.get(priLevel).remove(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: " + "Removing block " + block + " from priority queue "+ priLevel); @@ -245,8 +245,8 @@ boolean remove(Block block, int priLevel) { // not found in the queue for the given priority level. for (int i = 0; i < LEVEL; i++) { if (priorityQueues.get(i).remove(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: " + "Removing block " + block + " from priority queue "+ i); @@ -296,8 +296,8 @@ synchronized void update(Block block, int curReplicas, remove(block, oldPri); } if(priorityQueues.get(curPri).add(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.update:" + block + " has only "+ curReplicas diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index fb5c88d8bbc..309811c242f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -227,6 +227,7 @@ public long getProtocolVersion(String protocol, public static final int DEFAULT_PORT = 8020; public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange"); + public static final Log blockStateChangeLog = LogFactory.getLog("BlockStateChange"); public static final HAState ACTIVE_STATE = new ActiveState(); public static final HAState STANDBY_STATE = new StandbyState(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 6d505836f9f..de8a909e37f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -132,6 +132,7 @@ class NameNodeRpcServer implements NamenodeProtocols { private static final Log LOG = NameNode.LOG; private static final Log stateChangeLog = NameNode.stateChangeLog; + private static final Log blockStateChangeLog = NameNode.blockStateChangeLog; // Dependencies from other parts of NN. protected final FSNamesystem namesystem; @@ -889,8 +890,8 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg, String poolId, StorageBlockReport[] reports) throws IOException { verifyRequest(nodeReg); BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks()); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.blockReport: " + if(blockStateChangeLog.isDebugEnabled()) { + blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + " " + blist.getNumberOfBlocks() + " blocks"); } @@ -905,8 +906,8 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg, public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException { verifyRequest(nodeReg); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " + if(blockStateChangeLog.isDebugEnabled()) { + blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " +"from "+nodeReg+" "+receivedAndDeletedBlocks.length +" blocks."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index a6625431fdc..c0b3994aed9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -46,6 +46,7 @@ public class TestDatanodeDeath { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); + ((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index f488040c492..d33052e4c6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -51,6 +51,7 @@ public class TestFileAppend2 { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); + ((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);