HDFS-9941. Do not log StandbyException on NN, other minor logging fixes. Contributed by Arpit Agarwal.
(cherry picked from commit 5644137ada
)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
This commit is contained in:
parent
3d5ac829da
commit
dd462bc32f
|
@ -33,11 +33,13 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCSt
|
|||
*/
|
||||
public class BlockUnderConstructionFeature {
|
||||
private BlockUCState blockUCState;
|
||||
private static final ReplicaUnderConstruction[] NO_REPLICAS =
|
||||
new ReplicaUnderConstruction[0];
|
||||
|
||||
/**
|
||||
* Block replicas as assigned when the block was allocated.
|
||||
*/
|
||||
private ReplicaUnderConstruction[] replicas;
|
||||
private ReplicaUnderConstruction[] replicas = NO_REPLICAS;
|
||||
|
||||
/**
|
||||
* Index of the primary data node doing the recovery. Useful for log
|
||||
|
@ -90,7 +92,7 @@ public class BlockUnderConstructionFeature {
|
|||
|
||||
/** Get the number of expected locations */
|
||||
public int getNumExpectedLocations() {
|
||||
return replicas == null ? 0 : replicas.length;
|
||||
return replicas.length;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -127,14 +129,12 @@ public class BlockUnderConstructionFeature {
|
|||
|
||||
List<ReplicaUnderConstruction> getStaleReplicas(long genStamp) {
|
||||
List<ReplicaUnderConstruction> staleReplicas = new ArrayList<>();
|
||||
if (replicas != null) {
|
||||
// Remove replicas with wrong gen stamp. The replica list is unchanged.
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
if (genStamp != r.getGenerationStamp()) {
|
||||
staleReplicas.add(r);
|
||||
}
|
||||
}
|
||||
}
|
||||
return staleReplicas;
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ public class BlockUnderConstructionFeature {
|
|||
public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId) {
|
||||
setBlockUCState(BlockUCState.UNDER_RECOVERY);
|
||||
blockRecoveryId = recoveryId;
|
||||
if (replicas == null || replicas.length == 0) {
|
||||
if (replicas.length == 0) {
|
||||
NameNode.blockStateChangeLog.warn("BLOCK*" +
|
||||
" BlockUnderConstructionFeature.initializeBlockRecovery:" +
|
||||
" No blocks found, lease removed.");
|
||||
|
@ -197,7 +197,7 @@ public class BlockUnderConstructionFeature {
|
|||
/** Add the reported replica if it is not already in the replica list. */
|
||||
void addReplicaIfNotPresent(DatanodeStorageInfo storage,
|
||||
Block reportedBlock, ReplicaState rState) {
|
||||
if (replicas == null) {
|
||||
if (replicas.length == 0) {
|
||||
replicas = new ReplicaUnderConstruction[1];
|
||||
replicas[0] = new ReplicaUnderConstruction(reportedBlock, storage,
|
||||
rState);
|
||||
|
@ -240,7 +240,6 @@ public class BlockUnderConstructionFeature {
|
|||
.append(", truncateBlock=").append(truncateBlock)
|
||||
.append(", primaryNodeIndex=").append(primaryNodeIndex)
|
||||
.append(", replicas=[");
|
||||
if (replicas != null) {
|
||||
int i = 0;
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
r.appendStringTo(sb);
|
||||
|
@ -248,7 +247,17 @@ public class BlockUnderConstructionFeature {
|
|||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.append("]}");
|
||||
}
|
||||
|
||||
public void appendUCPartsConcise(StringBuilder sb) {
|
||||
sb.append("replicas=");
|
||||
int i = 0;
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
sb.append(r.getExpectedStorageLocation().getDatanodeDescriptor());
|
||||
if (++i < replicas.length) {
|
||||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
||||
import org.apache.hadoop.util.ChunkedArrayList;
|
||||
|
@ -289,6 +290,10 @@ public class DecommissionManager {
|
|||
BlockCollection bc,
|
||||
DatanodeDescriptor srcNode, NumberReplicas num,
|
||||
Iterable<DatanodeStorageInfo> storages) {
|
||||
if (!NameNode.blockStateChangeLog.isInfoEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int curReplicas = num.liveReplicas();
|
||||
int curExpectedReplicas = block.getReplication();
|
||||
StringBuilder nodeList = new StringBuilder();
|
||||
|
@ -297,7 +302,8 @@ public class DecommissionManager {
|
|||
nodeList.append(node);
|
||||
nodeList.append(" ");
|
||||
}
|
||||
LOG.info("Block: " + block + ", Expected Replicas: "
|
||||
NameNode.blockStateChangeLog.info(
|
||||
"Block: " + block + ", Expected Replicas: "
|
||||
+ curExpectedReplicas + ", live replicas: " + curReplicas
|
||||
+ ", corrupt replicas: " + num.corruptReplicas()
|
||||
+ ", decommissioned replicas: " + num.decommissioned()
|
||||
|
|
|
@ -811,10 +811,26 @@ class FSDirWriteFileOp {
|
|||
assert fsn.hasWriteLock();
|
||||
BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock,
|
||||
targets);
|
||||
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
|
||||
logAllocatedBlock(src, b);
|
||||
DatanodeStorageInfo.incrementBlocksScheduled(targets);
|
||||
}
|
||||
|
||||
private static void logAllocatedBlock(String src, BlockInfo b) {
|
||||
if (!NameNode.stateChangeLog.isInfoEnabled()) {
|
||||
return;
|
||||
}
|
||||
StringBuilder sb = new StringBuilder(150);
|
||||
sb.append("BLOCK* allocate ");
|
||||
b.appendStringTo(sb);
|
||||
sb.append(", ");
|
||||
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
|
||||
if (uc != null) {
|
||||
uc.appendUCPartsConcise(sb);
|
||||
}
|
||||
sb.append(" for " + src);
|
||||
NameNode.stateChangeLog.info(sb.toString());
|
||||
}
|
||||
|
||||
private static void setNewINodeStoragePolicy(BlockManager bm, INodeFile
|
||||
inode, INodesInPath iip, boolean isLazyPersist)
|
||||
throws IOException {
|
||||
|
|
|
@ -162,6 +162,7 @@ import org.apache.hadoop.ipc.RetryCache;
|
|||
import org.apache.hadoop.ipc.RetryCache.CacheEntry;
|
||||
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
import org.apache.hadoop.ipc.WritableRpcEngine;
|
||||
import org.apache.hadoop.ipc.RefreshRegistry;
|
||||
import org.apache.hadoop.ipc.RefreshResponse;
|
||||
|
@ -489,6 +490,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
FSLimitException.PathComponentTooLongException.class,
|
||||
FSLimitException.MaxDirectoryItemsExceededException.class,
|
||||
UnresolvedPathException.class);
|
||||
|
||||
clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class);
|
||||
|
||||
clientRpcServer.setTracer(nn.tracer);
|
||||
if (serviceRpcServer != null) {
|
||||
serviceRpcServer.setTracer(nn.tracer);
|
||||
|
|
Loading…
Reference in New Issue