HDFS-14042. Fix NPE when PROVIDED storage is missing. Contributed by Virajith Jalaparti.

This commit is contained in:
Giovanni Matteo Fumarola 2018-11-05 11:02:31 -08:00
parent b732ed3553
commit a1321d020a
4 changed files with 20 additions and 4 deletions

View File

@ -2430,7 +2430,7 @@ public class BlockManager implements BlockStatsMXBean {
return providedStorageMap.getCapacity(); return providedStorageMap.getCapacity();
} }
public void updateHeartbeat(DatanodeDescriptor node, StorageReport[] reports, void updateHeartbeat(DatanodeDescriptor node, StorageReport[] reports,
long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes, long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes,
VolumeFailureSummary volumeFailureSummary) { VolumeFailureSummary volumeFailureSummary) {
@ -2441,6 +2441,17 @@ public class BlockManager implements BlockStatsMXBean {
failedVolumes, volumeFailureSummary); failedVolumes, volumeFailureSummary);
} }
void updateHeartbeatState(DatanodeDescriptor node,
StorageReport[] reports, long cacheCapacity, long cacheUsed,
int xceiverCount, int failedVolumes,
VolumeFailureSummary volumeFailureSummary) {
for (StorageReport report: reports) {
providedStorageMap.updateStorage(node, report.getStorage());
}
node.updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount,
failedVolumes, volumeFailureSummary);
}
/** /**
* StatefulBlockInfo is used to build the "toUC" list, which is a list of * StatefulBlockInfo is used to build the "toUC" list, which is a list of
* updates to the information about under-construction blocks. * updates to the information about under-construction blocks.

View File

@ -373,7 +373,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** /**
* Updates stats from datanode heartbeat. * Updates stats from datanode heartbeat.
*/ */
public void updateHeartbeat(StorageReport[] reports, long cacheCapacity, void updateHeartbeat(StorageReport[] reports, long cacheCapacity,
long cacheUsed, int xceiverCount, int volFailures, long cacheUsed, int xceiverCount, int volFailures,
VolumeFailureSummary volumeFailureSummary) { VolumeFailureSummary volumeFailureSummary) {
updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount, updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount,
@ -384,7 +384,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** /**
* process datanode heartbeat or stats initialization. * process datanode heartbeat or stats initialization.
*/ */
public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity, void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
long cacheUsed, int xceiverCount, int volFailures, long cacheUsed, int xceiverCount, int volFailures,
VolumeFailureSummary volumeFailureSummary) { VolumeFailureSummary volumeFailureSummary) {
updateStorageStats(reports, cacheCapacity, cacheUsed, xceiverCount, updateStorageStats(reports, cacheCapacity, cacheUsed, xceiverCount,

View File

@ -251,7 +251,7 @@ class HeartbeatManager implements DatanodeStatistics {
// updateHeartbeat, because we don't want to modify the // updateHeartbeat, because we don't want to modify the
// heartbeatedSinceRegistration flag. Arrival of a lifeline message does // heartbeatedSinceRegistration flag. Arrival of a lifeline message does
// not count as arrival of the first heartbeat. // not count as arrival of the first heartbeat.
node.updateHeartbeatState(reports, cacheCapacity, cacheUsed, blockManager.updateHeartbeatState(node, reports, cacheCapacity, cacheUsed,
xceiverCount, failedVolumes, volumeFailureSummary); xceiverCount, failedVolumes, volumeFailureSummary);
stats.add(node); stats.add(node);
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LIFELINE_INTERVAL_SECONDS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LIFELINE_INTERVAL_SECONDS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
@ -196,6 +197,10 @@ public class TestDataNodeLifeline {
namesystem.getNumDeadDataNodes()); namesystem.getNumDeadDataNodes());
assertEquals("Expect DataNode not marked stale due to lifeline.", 0, assertEquals("Expect DataNode not marked stale due to lifeline.", 0,
namesystem.getNumStaleDataNodes()); namesystem.getNumStaleDataNodes());
// add a new volume on the next heartbeat
cluster.getDataNodes().get(0).reconfigurePropertyImpl(
DFS_DATANODE_DATA_DIR_KEY,
cluster.getDataDirectory().concat("/data-new"));
} }
// Verify that we did in fact call the lifeline RPC. // Verify that we did in fact call the lifeline RPC.