HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

This commit is contained in:
Vinayakumar B 2016-02-01 13:24:05 +05:30
parent 2673cbaf55
commit e418bd1fb0
3 changed files with 15 additions and 24 deletions

View File

@ -2655,6 +2655,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9682. Fix a typo "aplication" in HttpFS document. HDFS-9682. Fix a typo "aplication" in HttpFS document.
(Weiwei Yang via aajisaka) (Weiwei Yang via aajisaka)
HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
Release 2.7.3 - UNRELEASED Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1212,8 +1212,10 @@ public class BlockManager implements BlockStatsMXBean {
return; return;
} }
StringBuilder datanodes = new StringBuilder(); StringBuilder datanodes = new StringBuilder();
for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock, for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
State.NORMAL)) { if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor(); final DatanodeDescriptor node = storage.getDatanodeDescriptor();
final Block b = getBlockOnStorage(storedBlock, storage); final Block b = getBlockOnStorage(storedBlock, storage);
if (b != null) { if (b != null) {
@ -3164,7 +3166,10 @@ public class BlockManager implements BlockStatsMXBean {
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>(); Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
.getNodes(block); .getNodes(block);
for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) { for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (storage.areBlockContentsStale()) { if (storage.areBlockContentsStale()) {
LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}" LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}"
@ -3665,7 +3670,10 @@ public class BlockManager implements BlockStatsMXBean {
// else proceed with fast case // else proceed with fast case
int live = 0; int live = 0;
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor(); final DatanodeDescriptor node = storage.getDatanodeDescriptor();
if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node))) if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
live++; live++;

View File

@ -21,13 +21,9 @@ import java.util.Iterator;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.LightWeightGSet;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
/** /**
* This class maintains the map from a block to its metadata. * This class maintains the map from a block to its metadata.
* block's metadata currently includes blockCollection it belongs to and * block's metadata currently includes blockCollection it belongs to and
@ -149,22 +145,6 @@ class BlocksMap {
return getStorages(blocks.get(b)); return getStorages(blocks.get(b));
} }
/**
* Searches for the block in the BlocksMap and
* returns {@link Iterable} of the storages the block belongs to
* <i>that are of the given {@link DatanodeStorage.State state}</i>.
*
* @param state DatanodeStorage state by which to filter the returned Iterable
*/
Iterable<DatanodeStorageInfo> getStorages(Block b, final DatanodeStorage.State state) {
return Iterables.filter(getStorages(blocks.get(b)), new Predicate<DatanodeStorageInfo>() {
@Override
public boolean apply(DatanodeStorageInfo storage) {
return storage.getState() == state;
}
});
}
/** /**
* For a block that has already been retrieved from the BlocksMap * For a block that has already been retrieved from the BlocksMap
* returns {@link Iterable} of the storages the block belongs to. * returns {@link Iterable} of the storages the block belongs to.