HDFS-12082. BlockInvalidateLimit value is incorrectly set after namenode heartbeat interval reconfigured. Contributed by Weiwei Yang.

This commit is contained in:
Arpit Agarwal 2017-07-31 11:33:55 -07:00
parent b19415f1a1
commit 8c63580981
2 changed files with 50 additions and 6 deletions

View File

@ -269,12 +269,19 @@ public class DatanodeManager {
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+ 10 * 1000 * heartbeatIntervalSeconds;
final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
// Effected block invalidate limit is the bigger value between
// value configured in hdfs-site.xml, and 20 * HB interval.
final int configuredBlockInvalidateLimit = conf.getInt(
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
this.blockInvalidateLimit = conf.getInt(
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
configuredBlockInvalidateLimit);
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ "=" + this.blockInvalidateLimit);
+ ": configured=" + configuredBlockInvalidateLimit
+ ", counted=" + countedBlockInvalidateLimit
+ ", effected=" + blockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
@ -382,7 +389,8 @@ public class DatanodeManager {
return fsClusterStats;
}
int getBlockInvalidateLimit() {
@VisibleForTesting
public int getBlockInvalidateLimit() {
return blockInvalidateLimit;
}
@ -1812,7 +1820,7 @@ public class DatanodeManager {
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
* intervalSeconds;
this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
blockInvalidateLimit);
}
/**

View File

@ -40,6 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
public class TestNameNodeReconfigure {
@ -48,10 +49,13 @@ public class TestNameNodeReconfigure {
.getLog(TestNameNodeReconfigure.class);
private MiniDFSCluster cluster;
private final int customizedBlockInvalidateLimit = 500;
@Before
public void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY,
customizedBlockInvalidateLimit);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
}
@ -201,6 +205,38 @@ public class TestNameNodeReconfigure {
datanodeManager.getHeartbeatRecheckInterval());
}
@Test
public void testBlockInvalidateLimitAfterReconfigured()
throws ReconfigurationException {
final NameNode nameNode = cluster.getNameNode();
final DatanodeManager datanodeManager = nameNode.namesystem
.getBlockManager().getDatanodeManager();
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
customizedBlockInvalidateLimit,
datanodeManager.getBlockInvalidateLimit());
nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
Integer.toString(6));
// 20 * 6 = 120 < 500
// Invalid block limit should stay same as before after reconfiguration.
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ " is not honored after reconfiguration",
customizedBlockInvalidateLimit,
datanodeManager.getBlockInvalidateLimit());
nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
Integer.toString(50));
// 20 * 50 = 1000 > 500
// Invalid block limit should be reset to 1000
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ " is not reconfigured correctly",
1000,
datanodeManager.getBlockInvalidateLimit());
}
@After
public void shutDown() throws IOException {
if (cluster != null) {