diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e40ea3d427b..7294cabd53a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -701,8 +701,6 @@ Release 2.8.0 - UNRELEASED HDFS-8709. Clarify automatic sync in FSEditLog#logEdit. (wang) - HDFS-8652. Track BlockInfo instead of Block in CorruptReplicasMap. (jing9) - OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 4df2f0ea8c5..5ad992b5c72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -179,7 +179,7 @@ public abstract class BlockInfo extends Block * information indicating the index of the block in the * corresponding block group. */ - abstract void addStorage(DatanodeStorageInfo storage, Block reportedBlock); + abstract boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock); /** * Remove {@link DatanodeStorageInfo} location for a block @@ -192,11 +192,6 @@ public abstract class BlockInfo extends Block */ abstract void replaceBlock(BlockInfo newBlock); - /** - * @return true if there is no storage storing the block - */ - abstract boolean hasEmptyStorage(); - /** * Find specified DatanodeStorageInfo. * @return DatanodeStorageInfo or null if not found. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index 561facab696..de64ad84b86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -45,8 +45,8 @@ public class BlockInfoContiguous extends BlockInfo { } @Override - void addStorage(DatanodeStorageInfo storage, Block reportedBlock) { - ContiguousBlockStorageOp.addStorage(this, storage); + boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { + return ContiguousBlockStorageOp.addStorage(this, storage); } @Override @@ -73,9 +73,4 @@ public class BlockInfoContiguous extends BlockInfo { ucBlock.setBlockCollection(getBlockCollection()); return ucBlock; } - - @Override - boolean hasEmptyStorage() { - return ContiguousBlockStorageOp.hasEmptyStorage(this); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index 7924709c2b6..9cd3987a5cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -273,17 +274,18 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { "No blocks found, lease removed."); } boolean allLiveReplicasTriedAsPrimary = true; - for (ReplicaUnderConstruction replica : replicas) { + for (int i = 0; i < replicas.size(); i++) { // Check if all replicas have been tried or not. - if (replica.isAlive()) { - allLiveReplicasTriedAsPrimary = allLiveReplicasTriedAsPrimary - && replica.getChosenAsPrimary(); + if (replicas.get(i).isAlive()) { + allLiveReplicasTriedAsPrimary = + (allLiveReplicasTriedAsPrimary && + replicas.get(i).getChosenAsPrimary()); } } if (allLiveReplicasTriedAsPrimary) { // Just set all the replicas to be chosen whether they are alive or not. - for (ReplicaUnderConstruction replica : replicas) { - replica.setChosenAsPrimary(false); + for (int i = 0; i < replicas.size(); i++) { + replicas.get(i).setChosenAsPrimary(false); } } long mostRecentLastUpdate = 0; @@ -343,6 +345,10 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { * Convert an under construction block to a complete block. * * @return a complete block. + * @throws IOException + * if the state of the block (the generation stamp and the length) + * has not been committed by the client or it does not have at + * least a minimal number of replicas reported from data-nodes. */ public abstract BlockInfo convertToCompleteBlock(); @@ -380,8 +386,8 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { } private void appendUCParts(StringBuilder sb) { - sb.append("{UCState=").append(blockUCState).append(", truncateBlock=") - .append(truncateBlock) + sb.append("{UCState=").append(blockUCState) + .append(", truncateBlock=" + truncateBlock) .append(", primaryNodeIndex=").append(primaryNodeIndex) .append(", replicas=["); if (replicas != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java index 963f247c55c..d3cb337b121 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java @@ -55,6 +55,10 @@ public class BlockInfoUnderConstructionContiguous extends * Convert an under construction block to a complete block. * * @return BlockInfo - a complete block. + * @throws IOException if the state of the block + * (the generation stamp and the length) has not been committed by + * the client or it does not have at least a minimal number of replicas + * reported from data-nodes. */ @Override public BlockInfoContiguous convertToCompleteBlock() { @@ -65,8 +69,8 @@ public class BlockInfoUnderConstructionContiguous extends } @Override - void addStorage(DatanodeStorageInfo storage, Block reportedBlock) { - ContiguousBlockStorageOp.addStorage(this, storage); + boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { + return ContiguousBlockStorageOp.addStorage(this, storage); } @Override @@ -84,11 +88,6 @@ public class BlockInfoUnderConstructionContiguous extends ContiguousBlockStorageOp.replaceBlock(this, newBlock); } - @Override - boolean hasEmptyStorage() { - return ContiguousBlockStorageOp.hasEmptyStorage(this); - } - @Override public void setExpectedLocations(DatanodeStorageInfo[] targets) { int numLocations = targets == null ? 0 : targets.length; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 6ae3ee2daba..0b60a979ebd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.IOException; @@ -196,7 +197,7 @@ public class BlockManager implements BlockStatsMXBean { * notified of all block deletions that might have been pending * when the failover happened. */ - private final Set postponedMisreplicatedBlocks = Sets.newHashSet(); + private final Set postponedMisreplicatedBlocks = Sets.newHashSet(); /** * Maps a StorageID to the set of blocks that are "extra" for this @@ -337,7 +338,8 @@ public class BlockManager implements BlockStatsMXBean { DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT); this.shouldCheckForEnoughRacks = - conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null; + conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null + ? false : true; this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); @@ -463,7 +465,8 @@ public class BlockManager implements BlockStatsMXBean { /** Should the access keys be updated? */ boolean shouldUpdateBlockKey(final long updateTime) throws IOException { - return isBlockTokenEnabled() && blockTokenSecretManager.updateKeys(updateTime); + return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime) + : false; } public void activate(Configuration conf) { @@ -516,14 +519,14 @@ public class BlockManager implements BlockStatsMXBean { synchronized (neededReplications) { out.println("Metasave: Blocks waiting for replication: " + neededReplications.size()); - for (BlockInfo block : neededReplications) { + for (Block block : neededReplications) { dumpBlockMeta(block, out); } } // Dump any postponed over-replicated blocks out.println("Mis-replicated blocks that have been postponed:"); - for (BlockInfo block : postponedMisreplicatedBlocks) { + for (Block block : postponedMisreplicatedBlocks) { dumpBlockMeta(block, out); } @@ -541,9 +544,11 @@ public class BlockManager implements BlockStatsMXBean { * Dump the metadata for the given block in a human-readable * form. */ - private void dumpBlockMeta(BlockInfo block, PrintWriter out) { - List containingNodes = new ArrayList<>(); - List containingLiveReplicasNodes = new ArrayList<>(); + private void dumpBlockMeta(Block block, PrintWriter out) { + List containingNodes = + new ArrayList(); + List containingLiveReplicasNodes = + new ArrayList<>(); NumberReplicas numReplicas = new NumberReplicas(); // source node returned is not used @@ -551,16 +556,17 @@ public class BlockManager implements BlockStatsMXBean { containingLiveReplicasNodes, numReplicas, UnderReplicatedBlocks.LEVEL); - // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which - // are not included in the numReplicas.liveReplicas() count + // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which are + // not included in the numReplicas.liveReplicas() count assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas(); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedAndDecommissioning(); - - BlockCollection bc = block.getBlockCollection(); - String fileName = (bc == null) ? "[orphaned]" : bc.getName(); - out.print(fileName + ": "); - + + if (block instanceof BlockInfo) { + BlockCollection bc = ((BlockInfo) block).getBlockCollection(); + String fileName = (bc == null) ? "[orphaned]" : bc.getName(); + out.print(fileName + ": "); + } // l: == live:, d: == decommissioned c: == corrupt e: == excess out.print(block + ((usableReplicas > 0)? "" : " MISSING") + " (replicas:" + @@ -569,8 +575,8 @@ public class BlockManager implements BlockStatsMXBean { " c: " + numReplicas.corruptReplicas() + " e: " + numReplicas.excessReplicas() + ") "); - Collection corruptNodes = - corruptReplicas.getNodes(block); + Collection corruptNodes = + corruptReplicas.getNodes(block); for (DatanodeStorageInfo storage : getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); @@ -807,8 +813,7 @@ public class BlockManager implements BlockStatsMXBean { final long offset, final long length, final int nrBlocksToReturn, final AccessMode mode) throws IOException { int curBlk; - long curPos = 0; - long blkSize; + long curPos = 0, blkSize = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { blkSize = blocks[curBlk].getNumBytes(); @@ -1199,11 +1204,10 @@ public class BlockManager implements BlockStatsMXBean { } /** - * Mark a replica (of a contiguous block) or an internal block (of a striped - * block group) as corrupt. - * @param b Indicating the reported bad block and the corresponding BlockInfo - * stored in blocksMap. + * + * @param b * @param storageInfo storage that contains the block, if known. null otherwise. + * @throws IOException */ private void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeStorageInfo storageInfo, @@ -1224,7 +1228,7 @@ public class BlockManager implements BlockStatsMXBean { } // Add this replica to corruptReplicas Map - corruptReplicas.addToCorruptReplicasMap(b.stored, node, b.reason, + corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason, b.reasonCode); NumberReplicas numberOfReplicas = countNodes(b.stored); @@ -1246,7 +1250,7 @@ public class BlockManager implements BlockStatsMXBean { if (hasEnoughLiveReplicas || hasMoreCorruptReplicas || corruptedDuringWrite) { // the block is over-replicated so invalidate the replicas immediately - invalidateBlock(b, node, numberOfReplicas); + invalidateBlock(b, node); } else if (namesystem.isPopulatingReplQueues()) { // add the block to neededReplication updateNeededReplications(b.stored, -1, 0); @@ -1254,15 +1258,12 @@ public class BlockManager implements BlockStatsMXBean { } /** - * Invalidates the given block on the given datanode. Note that before this - * call we have already checked the current live replicas of the block and - * make sure it's safe to invalidate the replica. - * - * @return true if the replica was successfully invalidated and no longer - * associated with the DataNode. + * Invalidates the given block on the given datanode. + * @return true if the block was successfully invalidated and no longer + * present in the BlocksMap */ - private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn, - NumberReplicas nr) throws IOException { + private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn + ) throws IOException { blockLog.info("BLOCK* invalidateBlock: {} on {}", b, dn); DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { @@ -1271,30 +1272,35 @@ public class BlockManager implements BlockStatsMXBean { } // Check how many copies we have of the block + NumberReplicas nr = countNodes(b.stored); if (nr.replicasOnStaleNodes() > 0) { blockLog.info("BLOCK* invalidateBlocks: postponing " + "invalidation of {} on {} because {} replica(s) are located on " + "nodes with potentially out-of-date block reports", b, dn, nr.replicasOnStaleNodes()); - postponeBlock(b.stored); + postponeBlock(b.corrupted); return false; - } else { - // we already checked the number of replicas in the caller of this - // function and we know there is at least one copy on a live node, so we - // can delete it. + } else if (nr.liveReplicas() >= 1) { + // If we have at least one copy on a live node, then we can delete it. addToInvalidates(b.corrupted, dn); removeStoredBlock(b.stored, node); blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.", b, dn); return true; + } else { + blockLog.info("BLOCK* invalidateBlocks: {} on {} is the only copy and" + + " was not deleted", b, dn); + return false; } } + public void setPostponeBlocksFromFuture(boolean postpone) { this.shouldPostponeBlocksFromFuture = postpone; } - private void postponeBlock(BlockInfo blk) { + + private void postponeBlock(Block blk) { if (postponedMisreplicatedBlocks.add(blk)) { postponedMisreplicatedBlocksCount.incrementAndGet(); } @@ -1368,7 +1374,7 @@ public class BlockManager implements BlockStatsMXBean { int requiredReplication, numEffectiveReplicas; List containingNodes; DatanodeDescriptor srcNode; - BlockCollection bc; + BlockCollection bc = null; int additionalReplRequired; int scheduledWork = 0; @@ -1529,9 +1535,9 @@ public class BlockManager implements BlockStatsMXBean { DatanodeStorageInfo[] targets = rw.targets; if (targets != null && targets.length != 0) { StringBuilder targetList = new StringBuilder("datanode(s)"); - for (DatanodeStorageInfo target : targets) { + for (int k = 0; k < targets.length; k++) { targetList.append(' '); - targetList.append(target.getDatanodeDescriptor()); + targetList.append(targets[k].getDatanodeDescriptor()); } blockLog.info("BLOCK* ask {} to replicate {} to {}", rw.srcNode, rw.block, targetList); @@ -1608,8 +1614,8 @@ public class BlockManager implements BlockStatsMXBean { List datanodeDescriptors = null; if (nodes != null) { datanodeDescriptors = new ArrayList<>(nodes.size()); - for (String nodeStr : nodes) { - DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodeStr); + for (int i = 0; i < nodes.size(); i++) { + DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i)); if (node != null) { datanodeDescriptors.add(node); } @@ -1648,7 +1654,7 @@ public class BlockManager implements BlockStatsMXBean { * the given block */ @VisibleForTesting - DatanodeDescriptor chooseSourceDatanode(BlockInfo block, + DatanodeDescriptor chooseSourceDatanode(Block block, List containingNodes, List nodesContainingLiveReplicas, NumberReplicas numReplicas, @@ -1728,16 +1734,16 @@ public class BlockManager implements BlockStatsMXBean { if (timedOutItems != null) { namesystem.writeLock(); try { - for (BlockInfo timedOutItem : timedOutItems) { + for (int i = 0; i < timedOutItems.length; i++) { /* * Use the blockinfo from the blocksmap to be certain we're working * with the most up-to-date block information (e.g. genstamp). */ - BlockInfo bi = getStoredBlock(timedOutItem); + BlockInfo bi = getStoredBlock(timedOutItems[i]); if (bi == null) { continue; } - NumberReplicas num = countNodes(timedOutItem); + NumberReplicas num = countNodes(timedOutItems[i]); if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) { neededReplications.add(bi, num.liveReplicas(), num.decommissionedAndDecommissioning(), getReplication(bi)); @@ -1754,7 +1760,7 @@ public class BlockManager implements BlockStatsMXBean { public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { assert namesystem.hasReadLock(); - DatanodeDescriptor node; + DatanodeDescriptor node = null; try { node = datanodeManager.getDatanode(nodeReg); } catch (UnregisteredNodeException e) { @@ -2016,7 +2022,7 @@ public class BlockManager implements BlockStatsMXBean { startIndex += (base+1); } } - Iterator it = postponedMisreplicatedBlocks.iterator(); + Iterator it = postponedMisreplicatedBlocks.iterator(); for (int tmp = 0; tmp < startIndex; tmp++) { it.next(); } @@ -2111,7 +2117,7 @@ public class BlockManager implements BlockStatsMXBean { long oldGenerationStamp, long oldNumBytes, DatanodeStorageInfo[] newStorages) throws IOException { assert namesystem.hasWriteLock(); - BlockToMarkCorrupt b; + BlockToMarkCorrupt b = null; if (block.getGenerationStamp() != oldGenerationStamp) { b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp, "genstamp does not match " + oldGenerationStamp @@ -2713,7 +2719,7 @@ public class BlockManager implements BlockStatsMXBean { " but corrupt replicas map has " + corruptReplicasCount); } if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) { - invalidateCorruptReplicas(storedBlock, reportedBlock, num); + invalidateCorruptReplicas(storedBlock, reportedBlock); } return storedBlock; } @@ -2746,20 +2752,18 @@ public class BlockManager implements BlockStatsMXBean { * * @param blk Block whose corrupt replicas need to be invalidated */ - private void invalidateCorruptReplicas(BlockInfo blk, Block reported, - NumberReplicas numberReplicas) { + private void invalidateCorruptReplicas(BlockInfo blk, Block reported) { Collection nodes = corruptReplicas.getNodes(blk); boolean removedFromBlocksMap = true; if (nodes == null) return; // make a copy of the array of nodes in order to avoid // ConcurrentModificationException, when the block is removed from the node - DatanodeDescriptor[] nodesCopy = nodes.toArray( - new DatanodeDescriptor[nodes.size()]); + DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]); for (DatanodeDescriptor node : nodesCopy) { try { if (!invalidateBlock(new BlockToMarkCorrupt(reported, blk, null, - Reason.ANY), node, numberReplicas)) { + Reason.ANY), node)) { removedFromBlocksMap = false; } } catch (IOException e) { @@ -2809,6 +2813,7 @@ public class BlockManager implements BlockStatsMXBean { replicationQueuesInitializer.join(); } catch (final InterruptedException e) { LOG.warn("Interrupted while waiting for replicationQueueInitializer. Returning.."); + return; } finally { replicationQueuesInitializer = null; } @@ -3170,7 +3175,8 @@ public class BlockManager implements BlockStatsMXBean { CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks() .get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false)); if (cblock != null) { - boolean removed = node.getPendingCached().remove(cblock); + boolean removed = false; + removed |= node.getPendingCached().remove(cblock); removed |= node.getCached().remove(cblock); removed |= node.getPendingUncached().remove(cblock); if (removed) { @@ -3386,7 +3392,7 @@ public class BlockManager implements BlockStatsMXBean { int excess = 0; int stale = 0; Collection nodesCorrupt = corruptReplicas.getNodes(b); - for (DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { + for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { corrupt++; @@ -3407,8 +3413,7 @@ public class BlockManager implements BlockStatsMXBean { stale++; } } - return new NumberReplicas(live, decommissioned, decommissioning, corrupt, - excess, stale); + return new NumberReplicas(live, decommissioned, decommissioning, corrupt, excess, stale); } /** @@ -3591,6 +3596,8 @@ public class BlockManager implements BlockStatsMXBean { String src, BlockInfo[] blocks) { for (BlockInfo b: blocks) { if (!b.isComplete()) { + final BlockInfoUnderConstruction uc = + (BlockInfoUnderConstruction)b; final int numNodes = b.numNodes(); final int min = getMinStorageNum(b); final BlockUCState state = b.getBlockUCState(); @@ -3716,7 +3723,11 @@ public class BlockManager implements BlockStatsMXBean { return blocksMap.getBlockCollection(b); } - public void removeBlockFromMap(BlockInfo block) { + public int numCorruptReplicas(Block block) { + return corruptReplicas.numCorruptReplicas(block); + } + + public void removeBlockFromMap(Block block) { removeFromExcessReplicateMap(block); blocksMap.removeBlock(block); // If block is removed from blocksMap remove it from corruptReplicasMap @@ -3726,7 +3737,7 @@ public class BlockManager implements BlockStatsMXBean { /** * If a block is removed from blocksMap, remove it from excessReplicateMap. */ - private void removeFromExcessReplicateMap(BlockInfo block) { + private void removeFromExcessReplicateMap(Block block) { for (DatanodeStorageInfo info : getStorages(block)) { String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); LightWeightLinkedSet excessReplicas = @@ -3757,14 +3768,14 @@ public class BlockManager implements BlockStatsMXBean { /** * Get the replicas which are corrupt for a given block. */ - public Collection getCorruptReplicas(BlockInfo block) { + public Collection getCorruptReplicas(Block block) { return corruptReplicas.getNodes(block); } /** * Get reason for certain corrupted replicas for a given block and a given dn. */ - public String getCorruptReason(BlockInfo block, DatanodeDescriptor node) { + public String getCorruptReason(Block block, DatanodeDescriptor node) { return corruptReplicas.getCorruptReason(block, node); } @@ -3858,7 +3869,7 @@ public class BlockManager implements BlockStatsMXBean { datanodeManager.clearPendingQueues(); postponedMisreplicatedBlocks.clear(); postponedMisreplicatedBlocksCount.set(0); - } + }; public static LocatedBlock newLocatedBlock( ExtendedBlock b, DatanodeStorageInfo[] storages, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 85cea5ab99e..0dbf4858b7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -117,7 +117,7 @@ class BlocksMap { * remove it from all data-node lists it belongs to; * and remove all data-node locations associated with the block. */ - void removeBlock(BlockInfo block) { + void removeBlock(Block block) { BlockInfo blockInfo = blocks.remove(block); if (blockInfo == null) return; @@ -190,7 +190,7 @@ class BlocksMap { // remove block from the data-node list and the node from the block info boolean removed = node.removeBlock(info); - if (info.hasEmptyStorage() // no datanodes left + if (info.getDatanode(0) == null // no datanodes left && info.isDeleted()) { // does not belong to a file blocks.remove(b); // remove block from the map } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java index 70251e17cb6..092f65ec3cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java @@ -45,12 +45,13 @@ class ContiguousBlockStorageOp { return last; } - static void addStorage(BlockInfo b, DatanodeStorageInfo storage) { + static boolean addStorage(BlockInfo b, DatanodeStorageInfo storage) { // find the last null node int lastNode = ensureCapacity(b, 1); b.setStorageInfo(lastNode, storage); b.setNext(lastNode, null); b.setPrevious(lastNode, null); + return true; } static boolean removeStorage(BlockInfo b, @@ -102,8 +103,4 @@ class ContiguousBlockStorageOp { "newBlock already exists."); } } - - static boolean hasEmptyStorage(BlockInfo b) { - return b.getStorageInfo(0) == null; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 9a0023de0aa..fc2e23408f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.Server; @@ -46,12 +46,8 @@ public class CorruptReplicasMap{ CORRUPTION_REPORTED // client or datanode reported the corruption } - /** - * Used to track corrupted replicas (for contiguous block) or internal blocks - * (for striped block) and the corresponding DataNodes. For a striped block, - * the key here is the striped block group object stored in the blocksMap. - */ - private final SortedMap> corruptReplicasMap = new TreeMap<>(); + private final SortedMap> corruptReplicasMap = + new TreeMap>(); /** * Mark the block belonging to datanode as corrupt. @@ -61,21 +57,21 @@ public class CorruptReplicasMap{ * @param reason a textual reason (for logging purposes) * @param reasonCode the enum representation of the reason */ - void addToCorruptReplicasMap(BlockInfo blk, DatanodeDescriptor dn, + void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, String reason, Reason reasonCode) { Map nodes = corruptReplicasMap.get(blk); if (nodes == null) { - nodes = new HashMap<>(); + nodes = new HashMap(); corruptReplicasMap.put(blk, nodes); } - + String reasonText; if (reason != null) { reasonText = " because " + reason; } else { reasonText = ""; } - + if (!nodes.keySet().contains(dn)) { NameNode.blockStateChangeLog.info( "BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on " @@ -96,7 +92,7 @@ public class CorruptReplicasMap{ * * @param blk Block to be removed */ - void removeFromCorruptReplicasMap(BlockInfo blk) { + void removeFromCorruptReplicasMap(Block blk) { if (corruptReplicasMap != null) { corruptReplicasMap.remove(blk); } @@ -109,13 +105,12 @@ public class CorruptReplicasMap{ * @return true if the removal is successful; false if the replica is not in the map */ - boolean removeFromCorruptReplicasMap(BlockInfo blk, - DatanodeDescriptor datanode) { + boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) { return removeFromCorruptReplicasMap(blk, datanode, Reason.ANY); } - boolean removeFromCorruptReplicasMap(BlockInfo blk, - DatanodeDescriptor datanode, Reason reason) { + boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode, + Reason reason) { Map datanodes = corruptReplicasMap.get(blk); if (datanodes==null) return false; @@ -144,9 +139,11 @@ public class CorruptReplicasMap{ * @param blk Block for which nodes are requested * @return collection of nodes. Null if does not exists */ - Collection getNodes(BlockInfo blk) { - Map nodes = corruptReplicasMap.get(blk); - return nodes != null ? nodes.keySet() : null; + Collection getNodes(Block blk) { + Map nodes = corruptReplicasMap.get(blk); + if (nodes == null) + return null; + return nodes.keySet(); } /** @@ -156,12 +153,12 @@ public class CorruptReplicasMap{ * @param node DatanodeDescriptor which holds the replica * @return true if replica is corrupt, false if does not exists in this map */ - boolean isReplicaCorrupt(BlockInfo blk, DatanodeDescriptor node) { + boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) { Collection nodes = getNodes(blk); return ((nodes != null) && (nodes.contains(node))); } - int numCorruptReplicas(BlockInfo blk) { + int numCorruptReplicas(Block blk) { Collection nodes = getNodes(blk); return (nodes == null) ? 0 : nodes.size(); } @@ -171,9 +168,9 @@ public class CorruptReplicasMap{ } /** - * Return a range of corrupt replica block ids. Up to numExpectedBlocks + * Return a range of corrupt replica block ids. Up to numExpectedBlocks * blocks starting at the next block after startingBlockId are returned - * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId + * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId * is null, up to numExpectedBlocks blocks are returned from the beginning. * If startingBlockId cannot be found, null is returned. * @@ -184,39 +181,44 @@ public class CorruptReplicasMap{ * @return Up to numExpectedBlocks blocks from startingBlockId if it exists * */ - @VisibleForTesting long[] getCorruptReplicaBlockIds(int numExpectedBlocks, Long startingBlockId) { if (numExpectedBlocks < 0 || numExpectedBlocks > 100) { return null; } - Iterator blockIt = corruptReplicasMap.keySet().iterator(); + + Iterator blockIt = corruptReplicasMap.keySet().iterator(); + // if the starting block id was specified, iterate over keys until // we find the matching block. If we find a matching block, break - // to leave the iterator on the next block after the specified block. + // to leave the iterator on the next block after the specified block. if (startingBlockId != null) { boolean isBlockFound = false; while (blockIt.hasNext()) { - BlockInfo b = blockIt.next(); + Block b = blockIt.next(); if (b.getBlockId() == startingBlockId) { isBlockFound = true; - break; + break; } } + if (!isBlockFound) { return null; } } - ArrayList corruptReplicaBlockIds = new ArrayList<>(); + ArrayList corruptReplicaBlockIds = new ArrayList(); + // append up to numExpectedBlocks blockIds to our list for(int i=0; i block and blocksMap // fileNode should be under construction BlockInfoUnderConstruction uc = fileNode.removeLastBlock(block); @@ -136,9 +136,7 @@ class FSDirWriteFileOp { fsd.writeLock(); try { // Remove the block from the pending creates list - BlockInfo storedBlock = fsd.getBlockManager().getStoredBlock(localBlock); - if (storedBlock != null && - !unprotectedRemoveBlock(fsd, src, iip, file, storedBlock)) { + if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) { return; } } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 96d69829383..63ef9858bd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -1035,7 +1035,7 @@ public class FSEditLogLoader { throw new IOException("Trying to remove more than one block from file " + path); } - BlockInfo oldBlock = oldBlocks[oldBlocks.length - 1]; + Block oldBlock = oldBlocks[oldBlocks.length - 1]; boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock( fsDir, path, iip, file, oldBlock); if (!removed && !(op instanceof UpdateBlocksOp)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 2a8231ad101..ab179b454c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -267,8 +267,10 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas()); //record datanodes that have corrupted block replica - Collection corruptionRecord = - bm.getCorruptReplicas(blockInfo); + Collection corruptionRecord = null; + if (bm.getCorruptReplicas(block) != null) { + corruptionRecord = bm.getCorruptReplicas(block); + } //report block replicas status on datanodes for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) { @@ -277,7 +279,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { dn.getNetworkLocation() + " "); if (corruptionRecord != null && corruptionRecord.contains(dn)) { out.print(CORRUPT_STATUS+"\t ReasonCode: "+ - bm.getCorruptReason(blockInfo, dn)); + bm.getCorruptReason(block,dn)); } else if (dn.isDecommissioned() ){ out.print(DECOMMISSIONED_STATUS); } else if (dn.isDecommissionInProgress()) { @@ -648,7 +650,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { LightWeightLinkedSet blocksExcess = bm.excessReplicateMap.get(dnDesc.getDatanodeUuid()); Collection corruptReplicas = - bm.getCorruptReplicas(storedBlock); + bm.getCorruptReplicas(block.getLocalBlock()); sb.append("("); if (dnDesc.isDecommissioned()) { sb.append("DECOMMISSIONED)"); @@ -656,7 +658,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { sb.append("DECOMMISSIONING)"); } else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) { sb.append("CORRUPT)"); - } else if (blocksExcess != null && blocksExcess.contains(storedBlock)) { + } else if (blocksExcess != null && blocksExcess.contains(block.getLocalBlock())) { sb.append("EXCESS)"); } else if (dnDesc.isStale(this.staleInterval)) { sb.append("STALE_NODE)"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index af1e023457a..89ee674ff45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -560,8 +560,7 @@ public class DFSTestUtil { throws TimeoutException, InterruptedException { int count = 0; final int ATTEMPTS = 50; - int repls = BlockManagerTestUtil.numCorruptReplicas(ns.getBlockManager(), - b.getLocalBlock()); + int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock()); while (repls != corruptRepls && count < ATTEMPTS) { try { IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), @@ -573,8 +572,7 @@ public class DFSTestUtil { count++; // check more often so corrupt block reports are not easily missed for (int i = 0; i < 10; i++) { - repls = BlockManagerTestUtil.numCorruptReplicas(ns.getBlockManager(), - b.getLocalBlock()); + repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock()); Thread.sleep(100); if (repls == corruptRepls) { break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index a899891baaa..148135bae97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -24,7 +24,6 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.ExecutionException; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -88,7 +87,7 @@ public class BlockManagerTestUtil { final Block b) { final Set rackSet = new HashSet(0); final Collection corruptNodes = - getCorruptReplicas(blockManager).getNodes(blockManager.getStoredBlock(b)); + getCorruptReplicas(blockManager).getNodes(b); for(DatanodeStorageInfo storage : blockManager.blocksMap.getStorages(b)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { @@ -307,8 +306,4 @@ public class BlockManagerTestUtil { throws ExecutionException, InterruptedException { dm.getDecomManager().runMonitor(); } - - public static int numCorruptReplicas(BlockManager bm, Block block) { - return bm.corruptReplicas.numCorruptReplicas(bm.getStoredBlock(block)); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index c23f3d00925..bae4f1d41bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -63,7 +63,9 @@ public class TestBlockInfo { final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1"); - blockInfo.addStorage(storage, blockInfo); + boolean added = blockInfo.addStorage(storage, blockInfo); + + Assert.assertTrue(added); Assert.assertEquals(storage, blockInfo.getStorageInfo(0)); } @@ -71,7 +73,7 @@ public class TestBlockInfo { public void testCopyConstructor() { BlockInfo old = new BlockInfoContiguous((short) 3); try { - BlockInfo copy = new BlockInfoContiguous(old); + BlockInfo copy = new BlockInfoContiguous((BlockInfoContiguous)old); assertEquals(old.getBlockCollection(), copy.getBlockCollection()); assertEquals(old.getCapacity(), copy.getCapacity()); } catch (Exception e) { @@ -108,8 +110,8 @@ public class TestBlockInfo { final int MAX_BLOCKS = 10; DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1"); - ArrayList blockList = new ArrayList<>(MAX_BLOCKS); - ArrayList blockInfoList = new ArrayList<>(); + ArrayList blockList = new ArrayList(MAX_BLOCKS); + ArrayList blockInfoList = new ArrayList(); int headIndex; int curIndex; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index f6cc747208a..9e316708541 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -509,7 +509,7 @@ public class TestBlockManager { + " even if all available source nodes have reached their replication" + " limits below the hard limit.", bm.chooseSourceDatanode( - bm.getStoredBlock(aBlock), + aBlock, cntNodes, liveNodes, new NumberReplicas(), @@ -519,7 +519,7 @@ public class TestBlockManager { + " replication since all available source nodes have reached" + " their replication limits.", bm.chooseSourceDatanode( - bm.getStoredBlock(aBlock), + aBlock, cntNodes, liveNodes, new NumberReplicas(), @@ -532,7 +532,7 @@ public class TestBlockManager { assertNull("Does not choose a source node for a highest-priority" + " replication when all available nodes exceed the hard limit.", bm.chooseSourceDatanode( - bm.getStoredBlock(aBlock), + aBlock, cntNodes, liveNodes, new NumberReplicas(), @@ -558,7 +558,7 @@ public class TestBlockManager { + " if all available source nodes have reached their replication" + " limits below the hard limit.", bm.chooseSourceDatanode( - bm.getStoredBlock(aBlock), + aBlock, cntNodes, liveNodes, new NumberReplicas(), @@ -572,7 +572,7 @@ public class TestBlockManager { assertNull("Does not choose a source decommissioning node for a normal" + " replication when all available nodes exceed the hard limit.", bm.chooseSourceDatanode( - bm.getStoredBlock(aBlock), + aBlock, cntNodes, liveNodes, new NumberReplicas(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index 1a49beebd89..21fb54e289c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -48,19 +48,20 @@ public class TestCorruptReplicaInfo { private static final Log LOG = LogFactory.getLog(TestCorruptReplicaInfo.class); - private final Map block_map = new HashMap<>(); + private final Map block_map = + new HashMap(); // Allow easy block creation by block id // Return existing block if one with same block id already exists - private BlockInfo getBlock(Long block_id) { + private Block getBlock(Long block_id) { if (!block_map.containsKey(block_id)) { - block_map.put(block_id, - new BlockInfoContiguous(new Block(block_id, 0, 0), (short) 1)); + block_map.put(block_id, new Block(block_id,0,0)); } + return block_map.get(block_id); } - private BlockInfo getBlock(int block_id) { + private Block getBlock(int block_id) { return getBlock((long)block_id); } @@ -81,7 +82,7 @@ public class TestCorruptReplicaInfo { // create a list of block_ids. A list is used to allow easy validation of the // output of getCorruptReplicaBlockIds int NUM_BLOCK_IDS = 140; - List block_ids = new LinkedList<>(); + List block_ids = new LinkedList(); for (int i=0;i