HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)

This commit is contained in:
Colin Patrick Mccabe 2015-03-30 10:46:21 -07:00
parent 5358b83167
commit 1feb9569f3
2 changed files with 22 additions and 13 deletions

View File

@ -379,6 +379,10 @@ Release 2.8.0 - UNRELEASED
HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via
aajisaka) aajisaka)
HDFS-7261. storageMap is accessed without synchronization in
DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
P. McCabe)
Release 2.7.0 - UNRELEASED Release 2.7.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -447,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
if (checkFailedStorages) { if (checkFailedStorages) {
LOG.info("Number of failed storage changes from " LOG.info("Number of failed storage changes from "
+ this.volumeFailures + " to " + volFailures); + this.volumeFailures + " to " + volFailures);
failedStorageInfos = new HashSet<DatanodeStorageInfo>( synchronized (storageMap) {
storageMap.values()); failedStorageInfos =
new HashSet<DatanodeStorageInfo>(storageMap.values());
}
} }
setCacheCapacity(cacheCapacity); setCacheCapacity(cacheCapacity);
@ -480,8 +482,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
if (checkFailedStorages) { if (checkFailedStorages) {
updateFailedStorage(failedStorageInfos); updateFailedStorage(failedStorageInfos);
} }
long storageMapSize;
if (storageMap.size() != reports.length) { synchronized (storageMap) {
storageMapSize = storageMap.size();
}
if (storageMapSize != reports.length) {
pruneStorageMap(reports); pruneStorageMap(reports);
} }
} }
@ -491,14 +496,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
* as long as they have associated block replicas. * as long as they have associated block replicas.
*/ */
private void pruneStorageMap(final StorageReport[] reports) { private void pruneStorageMap(final StorageReport[] reports) {
synchronized (storageMap) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Number of storages reported in heartbeat=" + reports.length + LOG.debug("Number of storages reported in heartbeat=" + reports.length
"; Number of storages in storageMap=" + storageMap.size()); + "; Number of storages in storageMap=" + storageMap.size());
} }
HashMap<String, DatanodeStorageInfo> excessStorages; HashMap<String, DatanodeStorageInfo> excessStorages;
synchronized (storageMap) {
// Init excessStorages with all known storages. // Init excessStorages with all known storages.
excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap); excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
@ -515,8 +520,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
LOG.info("Removed storage " + storageInfo + " from DataNode" + this); LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
} else if (LOG.isDebugEnabled()) { } else if (LOG.isDebugEnabled()) {
// This can occur until all block reports are received. // This can occur until all block reports are received.
LOG.debug("Deferring removal of stale storage " + storageInfo + LOG.debug("Deferring removal of stale storage " + storageInfo
" with " + storageInfo.numBlocks() + " blocks"); + " with " + storageInfo.numBlocks() + " blocks");
} }
} }
} }