HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)

This commit is contained in:
Colin Patrick Mccabe 2015-03-30 10:46:21 -07:00
parent 5358b83167
commit 1feb9569f3
2 changed files with 22 additions and 13 deletions

View File

@ -379,6 +379,10 @@ Release 2.8.0 - UNRELEASED
HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via
aajisaka)
HDFS-7261. storageMap is accessed without synchronization in
DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
P. McCabe)
Release 2.7.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -447,8 +447,10 @@ public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
if (checkFailedStorages) {
LOG.info("Number of failed storage changes from "
+ this.volumeFailures + " to " + volFailures);
failedStorageInfos = new HashSet<DatanodeStorageInfo>(
storageMap.values());
synchronized (storageMap) {
failedStorageInfos =
new HashSet<DatanodeStorageInfo>(storageMap.values());
}
}
setCacheCapacity(cacheCapacity);
@ -480,8 +482,11 @@ public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
if (checkFailedStorages) {
updateFailedStorage(failedStorageInfos);
}
if (storageMap.size() != reports.length) {
long storageMapSize;
synchronized (storageMap) {
storageMapSize = storageMap.size();
}
if (storageMapSize != reports.length) {
pruneStorageMap(reports);
}
}
@ -491,14 +496,14 @@ public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
* as long as they have associated block replicas.
*/
private void pruneStorageMap(final StorageReport[] reports) {
if (LOG.isDebugEnabled()) {
LOG.debug("Number of storages reported in heartbeat=" + reports.length +
"; Number of storages in storageMap=" + storageMap.size());
}
HashMap<String, DatanodeStorageInfo> excessStorages;
synchronized (storageMap) {
if (LOG.isDebugEnabled()) {
LOG.debug("Number of storages reported in heartbeat=" + reports.length
+ "; Number of storages in storageMap=" + storageMap.size());
}
HashMap<String, DatanodeStorageInfo> excessStorages;
// Init excessStorages with all known storages.
excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
@ -515,8 +520,8 @@ private void pruneStorageMap(final StorageReport[] reports) {
LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
} else if (LOG.isDebugEnabled()) {
// This can occur until all block reports are received.
LOG.debug("Deferring removal of stale storage " + storageInfo +
" with " + storageInfo.numBlocks() + " blocks");
LOG.debug("Deferring removal of stale storage " + storageInfo
+ " with " + storageInfo.numBlocks() + " blocks");
}
}
}