Revert "HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery blocks. Contributed by Zhe Zhang."
This reverts commit de480d6c89
.
This commit is contained in:
parent
fb1be0b310
commit
663eba0ab1
|
@ -689,9 +689,6 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-8651. Make hadoop-hdfs-project Native code -Wall-clean (Alan Burlison
|
||||
via Colin P. McCabe)
|
||||
|
||||
HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery
|
||||
blocks. (Zhe Zhang via jing9)
|
||||
|
||||
HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and
|
||||
DatanodeStorageInfo. (Zhe Zhang via wang)
|
||||
|
||||
|
|
|
@ -172,23 +172,19 @@ public abstract class BlockInfo extends Block
|
|||
public abstract int numNodes();
|
||||
|
||||
/**
|
||||
* Add a {@link DatanodeStorageInfo} location for a block
|
||||
* @param storage The storage to add
|
||||
* @param reportedBlock The block reported from the datanode. This is only
|
||||
* used by erasure coded blocks, this block's id contains
|
||||
* information indicating the index of the block in the
|
||||
* corresponding block group.
|
||||
* Add a {@link DatanodeStorageInfo} location for a block.
|
||||
*/
|
||||
abstract boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock);
|
||||
abstract boolean addStorage(DatanodeStorageInfo storage);
|
||||
|
||||
/**
|
||||
* Remove {@link DatanodeStorageInfo} location for a block
|
||||
*/
|
||||
abstract boolean removeStorage(DatanodeStorageInfo storage);
|
||||
|
||||
|
||||
/**
|
||||
* Replace the current BlockInfo with the new one in corresponding
|
||||
* DatanodeStorageInfo's linked list.
|
||||
* DatanodeStorageInfo's linked list
|
||||
*/
|
||||
abstract void replaceBlock(BlockInfo newBlock);
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class BlockInfoContiguous extends BlockInfo {
|
|||
}
|
||||
|
||||
@Override
|
||||
boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
|
||||
boolean addStorage(DatanodeStorageInfo storage) {
|
||||
return ContiguousBlockStorageOp.addStorage(this, storage);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ public class BlockInfoUnderConstructionContiguous extends
|
|||
}
|
||||
|
||||
@Override
|
||||
boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
|
||||
boolean addStorage(DatanodeStorageInfo storage) {
|
||||
return ContiguousBlockStorageOp.addStorage(this, storage);
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,7 +24,6 @@ import java.util.List;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
|
||||
|
@ -234,7 +233,7 @@ public class DatanodeStorageInfo {
|
|||
return blockPoolUsed;
|
||||
}
|
||||
|
||||
public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) {
|
||||
public AddBlockResult addBlock(BlockInfo b) {
|
||||
// First check whether the block belongs to a different storage
|
||||
// on the same DN.
|
||||
AddBlockResult result = AddBlockResult.ADDED;
|
||||
|
@ -253,18 +252,10 @@ public class DatanodeStorageInfo {
|
|||
}
|
||||
|
||||
// add to the head of the data-node list
|
||||
b.addStorage(this, reportedBlock);
|
||||
insertToList(b);
|
||||
return result;
|
||||
}
|
||||
|
||||
AddBlockResult addBlock(BlockInfo b) {
|
||||
return addBlock(b, b);
|
||||
}
|
||||
|
||||
public void insertToList(BlockInfo b) {
|
||||
b.addStorage(this);
|
||||
blockList = b.listInsert(blockList, this);
|
||||
numBlocks++;
|
||||
return result;
|
||||
}
|
||||
|
||||
public boolean removeBlock(BlockInfo b) {
|
||||
|
|
|
@ -142,6 +142,7 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
|
|||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
|
@ -2790,7 +2791,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
if (trackBlockCounts) {
|
||||
if (b.isComplete()) {
|
||||
numRemovedComplete++;
|
||||
if (blockManager.hasMinStorage(b)) {
|
||||
if (blockManager.checkMinReplication(b)) {
|
||||
numRemovedSafe++;
|
||||
}
|
||||
}
|
||||
|
@ -3022,7 +3023,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
curBlock = blocks[nrCompleteBlocks];
|
||||
if(!curBlock.isComplete())
|
||||
break;
|
||||
assert blockManager.hasMinStorage(curBlock) :
|
||||
assert blockManager.checkMinReplication(curBlock) :
|
||||
"A COMPLETE block is not minimally replicated in " + src;
|
||||
}
|
||||
|
||||
|
@ -3058,7 +3059,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
// If penultimate block doesn't exist then its minReplication is met
|
||||
boolean penultimateBlockMinReplication = penultimateBlock == null ? true :
|
||||
blockManager.hasMinStorage(penultimateBlock);
|
||||
blockManager.checkMinReplication(penultimateBlock);
|
||||
|
||||
switch(lastBlockState) {
|
||||
case COMPLETE:
|
||||
|
@ -3067,7 +3068,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
case COMMITTED:
|
||||
// Close file if committed blocks are minimally replicated
|
||||
if(penultimateBlockMinReplication &&
|
||||
blockManager.hasMinStorage(lastBlock)) {
|
||||
blockManager.checkMinReplication(lastBlock)) {
|
||||
finalizeINodeFileUnderConstruction(src, pendingFile,
|
||||
iip.getLatestSnapshotId());
|
||||
NameNode.stateChangeLog.warn("BLOCK*"
|
||||
|
@ -3359,9 +3360,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i));
|
||||
if (storageInfo != null) {
|
||||
if(copyTruncate) {
|
||||
storageInfo.addBlock(truncatedBlock, truncatedBlock);
|
||||
storageInfo.addBlock(truncatedBlock);
|
||||
} else {
|
||||
storageInfo.addBlock(storedBlock, storedBlock);
|
||||
storageInfo.addBlock(storedBlock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3377,9 +3378,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
} else {
|
||||
iFile.setLastBlock(storedBlock, trimmedStorageInfos);
|
||||
if (closeFile) {
|
||||
blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(),
|
||||
storedBlock, oldGenerationStamp, oldNumBytes,
|
||||
trimmedStorageInfos);
|
||||
blockManager.markBlockReplicasAsCorrupt(storedBlock,
|
||||
oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -647,7 +647,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
.getStorageType()));
|
||||
}
|
||||
if (showReplicaDetails) {
|
||||
LightWeightLinkedSet<BlockInfo> blocksExcess =
|
||||
LightWeightLinkedSet<Block> blocksExcess =
|
||||
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
|
||||
Collection<DatanodeDescriptor> corruptReplicas =
|
||||
bm.getCorruptReplicas(block.getLocalBlock());
|
||||
|
|
|
@ -63,7 +63,7 @@ public class TestBlockInfo {
|
|||
|
||||
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
|
||||
|
||||
boolean added = blockInfo.addStorage(storage, blockInfo);
|
||||
boolean added = blockInfo.addStorage(storage);
|
||||
|
||||
Assert.assertTrue(added);
|
||||
Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
|
||||
|
|
|
@ -383,7 +383,7 @@ public class TestBlockManager {
|
|||
for (int i = 1; i < pipeline.length; i++) {
|
||||
DatanodeStorageInfo storage = pipeline[i];
|
||||
bm.addBlock(storage, blockInfo, null);
|
||||
blockInfo.addStorage(storage, blockInfo);
|
||||
blockInfo.addStorage(storage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -393,7 +393,7 @@ public class TestBlockManager {
|
|||
|
||||
for (DatanodeDescriptor dn : nodes) {
|
||||
for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
|
||||
blockInfo.addStorage(storage, blockInfo);
|
||||
blockInfo.addStorage(storage);
|
||||
}
|
||||
}
|
||||
return blockInfo;
|
||||
|
|
|
@ -100,7 +100,7 @@ public class TestNodeCount {
|
|||
DatanodeDescriptor nonExcessDN = null;
|
||||
for(DatanodeStorageInfo storage : bm.blocksMap.getStorages(block.getLocalBlock())) {
|
||||
final DatanodeDescriptor dn = storage.getDatanodeDescriptor();
|
||||
Collection<BlockInfo> blocks = bm.excessReplicateMap.get(dn.getDatanodeUuid());
|
||||
Collection<Block> blocks = bm.excessReplicateMap.get(dn.getDatanodeUuid());
|
||||
if (blocks == null || !blocks.contains(block.getLocalBlock()) ) {
|
||||
nonExcessDN = dn;
|
||||
break;
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
|
@ -41,6 +42,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestOverReplicatedBlocks {
|
||||
|
@ -183,7 +185,7 @@ public class TestOverReplicatedBlocks {
|
|||
// All replicas for deletion should be scheduled on lastDN.
|
||||
// And should not actually be deleted, because lastDN does not heartbeat.
|
||||
namesystem.readLock();
|
||||
Collection<BlockInfo> dnBlocks =
|
||||
Collection<Block> dnBlocks =
|
||||
namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
|
||||
assertEquals("Replicas on node " + lastDNid + " should have been deleted",
|
||||
SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
|
||||
|
|
|
@ -1250,7 +1250,7 @@ public class TestReplicationPolicy {
|
|||
when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
|
||||
when(storage.addBlock(any(BlockInfo.class))).thenReturn
|
||||
(DatanodeStorageInfo.AddBlockResult.ADDED);
|
||||
ucBlock.addStorage(storage, ucBlock);
|
||||
ucBlock.addStorage(storage);
|
||||
|
||||
when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any()))
|
||||
.thenReturn(ucBlock);
|
||||
|
|
Loading…
Reference in New Issue