HDFS-11681. DatanodeStorageInfo#getBlockIterator() should return an iterator to an unmodifiable set Contributed by Virajith Jalaparti
This commit is contained in:
parent
aa6f3238d6
commit
39e6331772
|
@ -1409,12 +1409,15 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
void removeBlocksAssociatedTo(final DatanodeDescriptor node) {
|
void removeBlocksAssociatedTo(final DatanodeDescriptor node) {
|
||||||
for (DatanodeStorageInfo storage : node.getStorageInfos()) {
|
for (DatanodeStorageInfo storage : node.getStorageInfos()) {
|
||||||
final Iterator<BlockInfo> it = storage.getBlockIterator();
|
final Iterator<BlockInfo> it = storage.getBlockIterator();
|
||||||
|
//add the BlockInfos to a new collection as the
|
||||||
|
//returned iterator is not modifiable.
|
||||||
|
Collection<BlockInfo> toRemove = new ArrayList<>();
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
BlockInfo block = it.next();
|
toRemove.add(it.next());
|
||||||
// DatanodeStorageInfo must be removed using the iterator to avoid
|
}
|
||||||
// ConcurrentModificationException in the underlying storage
|
|
||||||
it.remove();
|
for (BlockInfo b : toRemove) {
|
||||||
removeStoredBlock(block, node);
|
removeStoredBlock(b, node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remove all pending DN messages referencing this DN.
|
// Remove all pending DN messages referencing this DN.
|
||||||
|
@ -1429,11 +1432,11 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
assert namesystem.hasWriteLock();
|
assert namesystem.hasWriteLock();
|
||||||
final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
|
final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
|
||||||
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||||
while(it.hasNext()) {
|
Collection<BlockInfo> toRemove = new ArrayList<>();
|
||||||
BlockInfo block = it.next();
|
while (it.hasNext()) {
|
||||||
// DatanodeStorageInfo must be removed using the iterator to avoid
|
toRemove.add(it.next());
|
||||||
// ConcurrentModificationException in the underlying storage
|
}
|
||||||
it.remove();
|
for (BlockInfo block : toRemove) {
|
||||||
removeStoredBlock(block, node);
|
removeStoredBlock(block, node);
|
||||||
final Block b = getBlockOnStorage(block, storageInfo);
|
final Block b = getBlockOnStorage(block, storageInfo);
|
||||||
if (b != null) {
|
if (b != null) {
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
@ -270,8 +271,12 @@ public class DatanodeStorageInfo {
|
||||||
return blocks.size();
|
return blocks.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return iterator to an unmodifiable set of blocks
|
||||||
|
* related to this {@link DatanodeStorageInfo}
|
||||||
|
*/
|
||||||
Iterator<BlockInfo> getBlockIterator() {
|
Iterator<BlockInfo> getBlockIterator() {
|
||||||
return blocks.iterator();
|
return Collections.unmodifiableSet(blocks).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateState(StorageReport r) {
|
void updateState(StorageReport r) {
|
||||||
|
|
|
@ -307,6 +307,13 @@ public class TestGetBlocks {
|
||||||
BlockManagerTestUtil.getBlockIterator(s);
|
BlockManagerTestUtil.getBlockIterator(s);
|
||||||
while(storageBlockIt.hasNext()) {
|
while(storageBlockIt.hasNext()) {
|
||||||
allBlocks[idx++] = storageBlockIt.next();
|
allBlocks[idx++] = storageBlockIt.next();
|
||||||
|
try {
|
||||||
|
storageBlockIt.remove();
|
||||||
|
assertTrue(
|
||||||
|
"BlockInfo iterator should have been unmodifiable", false);
|
||||||
|
} catch (UnsupportedOperationException e) {
|
||||||
|
//expected exception
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue