HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903 and HDFS-7435. Contributed by Zhe Zhang.

This commit is contained in:
Jing Zhao 2015-03-16 14:27:21 -07:00 committed by Zhe Zhang
parent 68caf8728e
commit 11585883a9
5 changed files with 8 additions and 8 deletions

View File

@ -535,7 +535,7 @@ public class DecommissionManager {
*/ */
private void processBlocksForDecomInternal( private void processBlocksForDecomInternal(
final DatanodeDescriptor datanode, final DatanodeDescriptor datanode,
final Iterator<BlockInfoContiguous> it, final Iterator<? extends BlockInfo> it,
final List<BlockInfoContiguous> insufficientlyReplicated, final List<BlockInfoContiguous> insufficientlyReplicated,
boolean pruneSufficientlyReplicated) { boolean pruneSufficientlyReplicated) {
boolean firstReplicationLog = true; boolean firstReplicationLog = true;

View File

@ -2030,7 +2030,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
// Check if the file is already being truncated with the same length // Check if the file is already being truncated with the same length
final BlockInfoContiguous last = file.getLastBlock(); final BlockInfo last = file.getLastBlock();
if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
final Block truncateBlock final Block truncateBlock
= ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock(); = ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();

View File

@ -21,6 +21,7 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -131,7 +132,7 @@ public class FileDiffList extends
break; break;
} }
// Check if last block is part of truncate recovery // Check if last block is part of truncate recovery
BlockInfoContiguous lastBlock = file.getLastBlock(); BlockInfo lastBlock = file.getLastBlock();
Block dontRemoveBlock = null; Block dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals( if (lastBlock != null && lastBlock.getBlockUCState().equals(
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {

View File

@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -671,8 +671,7 @@ public class TestDecommission {
GenericTestUtils.waitFor(new Supplier<Boolean>() { GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override @Override
public Boolean get() { public Boolean get() {
BlockInfoContiguous info = BlockInfo info = blockManager.getStoredBlock(b.getLocalBlock());
blockManager.getStoredBlock(b.getLocalBlock());
int count = 0; int count = 0;
StringBuilder sb = new StringBuilder("Replica locations: "); StringBuilder sb = new StringBuilder("Replica locations: ");
for (int i = 0; i < info.numNodes(); i++) { for (int i = 0; i < info.numNodes(); i++) {

View File

@ -265,9 +265,9 @@ public class TestAddStripedBlocks {
ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null,
null); null);
blocks.add(replica); blocks.add(replica);
BlockListAsLongs bll = new BlockListAsLongs(null, blocks); BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
StorageBlockReport[] reports = {new StorageBlockReport(storage, StorageBlockReport[] reports = {new StorageBlockReport(storage,
bll.getBlockListAsLongs())}; bll)};
cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId),
bpId, reports); bpId, reports);
} }