HDFS-4657. Limit the number of blocks logged by the NN after a block report to a configurable value. (Aaron Twinning Meyers via Colin Patrick McCabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1503862 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a3a9d72e98
commit
badab7ed5e
|
@ -250,6 +250,9 @@ Release 2.2.0 - UNRELEASED
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
HDFS-4657. Limit the number of blocks logged by the NN after a block
|
||||||
|
report to a configurable value. (Aaron Twinning Meyers via Colin Patrick
|
||||||
|
McCabe)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
|
|
@ -483,4 +483,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final int DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT = 120000;
|
public static final int DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT = 120000;
|
||||||
public static final int DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT = 120000;
|
public static final int DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT = 120000;
|
||||||
public static final int DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT = 20000;
|
public static final int DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT = 20000;
|
||||||
|
|
||||||
|
public static final String DFS_MAX_NUM_BLOCKS_TO_LOG_KEY = "dfs.namenode.max-num-blocks-to-log";
|
||||||
|
public static final long DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT = 1000l;
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,6 +217,9 @@ public class BlockManager {
|
||||||
// whether or not to issue block encryption keys.
|
// whether or not to issue block encryption keys.
|
||||||
final boolean encryptDataTransfer;
|
final boolean encryptDataTransfer;
|
||||||
|
|
||||||
|
// Max number of blocks to log info about during a block report.
|
||||||
|
private final long maxNumBlocksToLog;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* When running inside a Standby node, the node may receive block reports
|
* When running inside a Standby node, the node may receive block reports
|
||||||
* from datanodes before receiving the corresponding namespace edits from
|
* from datanodes before receiving the corresponding namespace edits from
|
||||||
|
@ -297,6 +300,10 @@ public class BlockManager {
|
||||||
conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
|
conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
|
||||||
DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
|
DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
|
||||||
|
|
||||||
|
this.maxNumBlocksToLog =
|
||||||
|
conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
|
||||||
|
DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
|
||||||
|
|
||||||
LOG.info("defaultReplication = " + defaultReplication);
|
LOG.info("defaultReplication = " + defaultReplication);
|
||||||
LOG.info("maxReplication = " + maxReplication);
|
LOG.info("maxReplication = " + maxReplication);
|
||||||
LOG.info("minReplication = " + minReplication);
|
LOG.info("minReplication = " + minReplication);
|
||||||
|
@ -304,6 +311,7 @@ public class BlockManager {
|
||||||
LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
|
LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
|
||||||
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
||||||
LOG.info("encryptDataTransfer = " + encryptDataTransfer);
|
LOG.info("encryptDataTransfer = " + encryptDataTransfer);
|
||||||
|
LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static BlockTokenSecretManager createBlockTokenSecretManager(
|
private static BlockTokenSecretManager createBlockTokenSecretManager(
|
||||||
|
@ -1698,8 +1706,14 @@ public class BlockManager {
|
||||||
for (Block b : toRemove) {
|
for (Block b : toRemove) {
|
||||||
removeStoredBlock(b, node);
|
removeStoredBlock(b, node);
|
||||||
}
|
}
|
||||||
|
int numBlocksLogged = 0;
|
||||||
for (BlockInfo b : toAdd) {
|
for (BlockInfo b : toAdd) {
|
||||||
addStoredBlock(b, node, null, true);
|
addStoredBlock(b, node, null, numBlocksLogged < maxNumBlocksToLog);
|
||||||
|
numBlocksLogged++;
|
||||||
|
}
|
||||||
|
if (numBlocksLogged > maxNumBlocksToLog) {
|
||||||
|
blockLog.info("BLOCK* processReport: logged info for " + maxNumBlocksToLog
|
||||||
|
+ " of " + numBlocksLogged + " reported.");
|
||||||
}
|
}
|
||||||
for (Block b : toInvalidate) {
|
for (Block b : toInvalidate) {
|
||||||
blockLog.info("BLOCK* processReport: "
|
blockLog.info("BLOCK* processReport: "
|
||||||
|
@ -2637,8 +2651,14 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
||||||
for (StatefulBlockInfo b : toUC) {
|
for (StatefulBlockInfo b : toUC) {
|
||||||
addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState);
|
addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState);
|
||||||
}
|
}
|
||||||
|
long numBlocksLogged = 0;
|
||||||
for (BlockInfo b : toAdd) {
|
for (BlockInfo b : toAdd) {
|
||||||
addStoredBlock(b, node, delHintNode, true);
|
addStoredBlock(b, node, delHintNode, numBlocksLogged < maxNumBlocksToLog);
|
||||||
|
numBlocksLogged++;
|
||||||
|
}
|
||||||
|
if (numBlocksLogged > maxNumBlocksToLog) {
|
||||||
|
blockLog.info("BLOCK* addBlock: logged info for " + maxNumBlocksToLog
|
||||||
|
+ " of " + numBlocksLogged + " reported.");
|
||||||
}
|
}
|
||||||
for (Block b : toInvalidate) {
|
for (Block b : toInvalidate) {
|
||||||
blockLog.info("BLOCK* addBlock: block "
|
blockLog.info("BLOCK* addBlock: block "
|
||||||
|
|
Loading…
Reference in New Issue