HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)
This commit is contained in:
parent
5358b83167
commit
1feb9569f3
|
@ -379,6 +379,10 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via
|
||||
aajisaka)
|
||||
|
||||
HDFS-7261. storageMap is accessed without synchronization in
|
||||
DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
|
||||
P. McCabe)
|
||||
|
||||
Release 2.7.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -447,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
if (checkFailedStorages) {
|
||||
LOG.info("Number of failed storage changes from "
|
||||
+ this.volumeFailures + " to " + volFailures);
|
||||
failedStorageInfos = new HashSet<DatanodeStorageInfo>(
|
||||
storageMap.values());
|
||||
synchronized (storageMap) {
|
||||
failedStorageInfos =
|
||||
new HashSet<DatanodeStorageInfo>(storageMap.values());
|
||||
}
|
||||
}
|
||||
|
||||
setCacheCapacity(cacheCapacity);
|
||||
|
@ -480,8 +482,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
if (checkFailedStorages) {
|
||||
updateFailedStorage(failedStorageInfos);
|
||||
}
|
||||
|
||||
if (storageMap.size() != reports.length) {
|
||||
long storageMapSize;
|
||||
synchronized (storageMap) {
|
||||
storageMapSize = storageMap.size();
|
||||
}
|
||||
if (storageMapSize != reports.length) {
|
||||
pruneStorageMap(reports);
|
||||
}
|
||||
}
|
||||
|
@ -491,14 +496,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
* as long as they have associated block replicas.
|
||||
*/
|
||||
private void pruneStorageMap(final StorageReport[] reports) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Number of storages reported in heartbeat=" + reports.length +
|
||||
"; Number of storages in storageMap=" + storageMap.size());
|
||||
}
|
||||
|
||||
HashMap<String, DatanodeStorageInfo> excessStorages;
|
||||
|
||||
synchronized (storageMap) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Number of storages reported in heartbeat=" + reports.length
|
||||
+ "; Number of storages in storageMap=" + storageMap.size());
|
||||
}
|
||||
|
||||
HashMap<String, DatanodeStorageInfo> excessStorages;
|
||||
|
||||
// Init excessStorages with all known storages.
|
||||
excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
|
||||
|
||||
|
@ -515,8 +520,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
|
||||
} else if (LOG.isDebugEnabled()) {
|
||||
// This can occur until all block reports are received.
|
||||
LOG.debug("Deferring removal of stale storage " + storageInfo +
|
||||
" with " + storageInfo.numBlocks() + " blocks");
|
||||
LOG.debug("Deferring removal of stale storage " + storageInfo
|
||||
+ " with " + storageInfo.numBlocks() + " blocks");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue