Allow block reconstruction pending timeout to be refreshable (#4567)

Reviewed-by: Hiroyuki Adachi <hadachi@yahoo-corp.jp>
Signed-off-by: Takanobu Asanuma <tasanuma@apache.org>
This commit is contained in:
caozhiqiang 2022-09-12 10:45:01 +08:00 committed by GitHub
parent 21bae31d58
commit 1923096adb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 58 additions and 6 deletions

View File

@ -1067,6 +1067,26 @@ public void setBlocksReplWorkMultiplier(int newVal) {
blocksReplWorkMultiplier = newVal;
}
/**
* Updates the value used for pendingReconstruction timeout, which is set by
* {@code DFSConfigKeys.
* DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY} initially.
*
* @param newVal - Must be a positive non-zero integer.
*/
public void setReconstructionPendingTimeout(int newVal) {
ensurePositiveInt(newVal,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY);
pendingReconstruction.setTimeout(newVal * 1000L);
}
/** Returns the current setting for pendingReconstruction timeout, set by
* {@code DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY}.
*/
public int getReconstructionPendingTimeout() {
return (int)(pendingReconstruction.getTimeout() / 1000L);
}
public int getDefaultStorageNum(BlockInfo block) {
switch (block.getBlockType()) {
case STRIPED: return ((BlockInfoStriped) block).getRealTotalBlockNum();

View File

@ -76,6 +76,14 @@ void start() {
timerThread.start();
}
public void setTimeout(long timeoutPeriod) {
this.timeout = timeoutPeriod;
}
public long getTimeout() {
return this.timeout;
}
/**
* Add a block to the list of pending reconstructions
* @param block The corresponding block

View File

@ -203,6 +203,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
@ -350,7 +352,8 @@ public enum OperationCategory {
DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY,
DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY));
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@ -2301,7 +2304,8 @@ protected String reconfigurePropertyImpl(String property, String newVal)
} else if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)
|| property.equals(DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY)
|| property.equals(
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)) {
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)
|| property.equals(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)) {
return reconfReplicationParameters(newVal, property);
} else if (property.equals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY) || property
.equals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)) {
@ -2347,6 +2351,14 @@ private String reconfReplicationParameters(final String newVal,
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT,
newVal));
newSetting = bm.getBlocksReplWorkMultiplier();
} else if (
property.equals(
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)) {
bm.setReconstructionPendingTimeout(
adjustNewVal(
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT,
newVal));
newSetting = bm.getReconstructionPendingTimeout();
} else {
throw new IllegalArgumentException("Unexpected property " +
property + " in reconfReplicationParameters");

View File

@ -49,6 +49,9 @@ public void setup() throws IOException {
config.setInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
12);
config.setInt(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
300);
cluster = new MiniDFSCluster.Builder(config)
.nnTopology(MiniDFSNNTopology.simpleSingleNN(0, 0))
@ -72,6 +75,7 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());
cluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, "20");
@ -81,10 +85,14 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException {
cluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
"24");
cluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
"180");
assertEquals(20, bm.getMaxReplicationStreams());
assertEquals(22, bm.getReplicationStreamsHardLimit());
assertEquals(24, bm.getBlocksReplWorkMultiplier());
assertEquals(180, bm.getReconstructionPendingTimeout());
}
/**
@ -96,7 +104,8 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
String[] keys = new String[]{
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY
};
// Ensure we cannot set any of the parameters negative
@ -112,6 +121,7 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());
for (String key : keys) {
ReconfigurationException e =
@ -126,6 +136,7 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());
// Ensure none of the parameters can be set to a string value
for (String key : keys) {
@ -139,5 +150,6 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());
}
}
}

View File

@ -438,7 +438,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
assertEquals(19, outs.size());
assertEquals(20, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
@ -1266,4 +1266,4 @@ public void testAllDatanodesReconfig()
outs.get(8));
}
}
}