HDFS-12082. BlockInvalidateLimit value is incorrectly set after namenode heartbeat interval reconfigured. Contributed by Weiwei Yang.
This commit is contained in:
parent
b19415f1a1
commit
8c63580981
|
@ -269,12 +269,19 @@ public class DatanodeManager {
|
||||||
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
|
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
|
||||||
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
|
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
|
||||||
+ 10 * 1000 * heartbeatIntervalSeconds;
|
+ 10 * 1000 * heartbeatIntervalSeconds;
|
||||||
final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
|
|
||||||
|
// Effected block invalidate limit is the bigger value between
|
||||||
|
// value configured in hdfs-site.xml, and 20 * HB interval.
|
||||||
|
final int configuredBlockInvalidateLimit = conf.getInt(
|
||||||
|
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
|
||||||
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
|
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
|
||||||
this.blockInvalidateLimit = conf.getInt(
|
final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
|
||||||
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
|
this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
|
||||||
|
configuredBlockInvalidateLimit);
|
||||||
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
|
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
|
||||||
+ "=" + this.blockInvalidateLimit);
|
+ ": configured=" + configuredBlockInvalidateLimit
|
||||||
|
+ ", counted=" + countedBlockInvalidateLimit
|
||||||
|
+ ", effected=" + blockInvalidateLimit);
|
||||||
|
|
||||||
this.checkIpHostnameInRegistration = conf.getBoolean(
|
this.checkIpHostnameInRegistration = conf.getBoolean(
|
||||||
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
|
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
|
||||||
|
@ -382,7 +389,8 @@ public class DatanodeManager {
|
||||||
return fsClusterStats;
|
return fsClusterStats;
|
||||||
}
|
}
|
||||||
|
|
||||||
int getBlockInvalidateLimit() {
|
@VisibleForTesting
|
||||||
|
public int getBlockInvalidateLimit() {
|
||||||
return blockInvalidateLimit;
|
return blockInvalidateLimit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1812,7 +1820,7 @@ public class DatanodeManager {
|
||||||
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
|
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
|
||||||
* intervalSeconds;
|
* intervalSeconds;
|
||||||
this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
|
this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
|
||||||
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
|
blockInvalidateLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -40,6 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
|
||||||
|
|
||||||
public class TestNameNodeReconfigure {
|
public class TestNameNodeReconfigure {
|
||||||
|
@ -48,10 +49,13 @@ public class TestNameNodeReconfigure {
|
||||||
.getLog(TestNameNodeReconfigure.class);
|
.getLog(TestNameNodeReconfigure.class);
|
||||||
|
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
|
private final int customizedBlockInvalidateLimit = 500;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws IOException {
|
public void setUp() throws IOException {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY,
|
||||||
|
customizedBlockInvalidateLimit);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
}
|
}
|
||||||
|
@ -201,6 +205,38 @@ public class TestNameNodeReconfigure {
|
||||||
datanodeManager.getHeartbeatRecheckInterval());
|
datanodeManager.getHeartbeatRecheckInterval());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBlockInvalidateLimitAfterReconfigured()
|
||||||
|
throws ReconfigurationException {
|
||||||
|
final NameNode nameNode = cluster.getNameNode();
|
||||||
|
final DatanodeManager datanodeManager = nameNode.namesystem
|
||||||
|
.getBlockManager().getDatanodeManager();
|
||||||
|
|
||||||
|
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
|
||||||
|
customizedBlockInvalidateLimit,
|
||||||
|
datanodeManager.getBlockInvalidateLimit());
|
||||||
|
|
||||||
|
nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
|
||||||
|
Integer.toString(6));
|
||||||
|
|
||||||
|
// 20 * 6 = 120 < 500
|
||||||
|
// Invalid block limit should stay same as before after reconfiguration.
|
||||||
|
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
|
||||||
|
+ " is not honored after reconfiguration",
|
||||||
|
customizedBlockInvalidateLimit,
|
||||||
|
datanodeManager.getBlockInvalidateLimit());
|
||||||
|
|
||||||
|
nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
|
||||||
|
Integer.toString(50));
|
||||||
|
|
||||||
|
// 20 * 50 = 1000 > 500
|
||||||
|
// Invalid block limit should be reset to 1000
|
||||||
|
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
|
||||||
|
+ " is not reconfigured correctly",
|
||||||
|
1000,
|
||||||
|
datanodeManager.getBlockInvalidateLimit());
|
||||||
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void shutDown() throws IOException {
|
public void shutDown() throws IOException {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
|
|
Loading…
Reference in New Issue