HDFS-11445. FSCK shows overall health stauts as corrupt even one replica is corrupt. Contributed by Brahma Reddy Battula.

This commit is contained in:
Brahma Reddy Battula 2017-05-25 22:35:10 +08:00
parent 8bf0e2d6b3
commit 2e41f8803d
4 changed files with 63 additions and 16 deletions

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.LightWeightGSet;
import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
@ -286,28 +285,25 @@ public abstract class BlockInfo extends Block
* Process the recorded replicas. When about to commit or finish the
* pipeline recovery sort out bad replicas.
* @param genStamp The final generation stamp for the block.
* @return staleReplica's List.
*/
public void setGenerationStampAndVerifyReplicas(long genStamp) {
public List<ReplicaUnderConstruction> setGenerationStampAndVerifyReplicas(
long genStamp) {
Preconditions.checkState(uc != null && !isComplete());
// Set the generation stamp for the block.
setGenerationStamp(genStamp);
// Remove the replicas with wrong gen stamp
List<ReplicaUnderConstruction> staleReplicas = uc.getStaleReplicas(genStamp);
for (ReplicaUnderConstruction r : staleReplicas) {
r.getExpectedStorageLocation().removeBlock(this);
NameNode.blockStateChangeLog.debug("BLOCK* Removing stale replica {}"
+ " of {}", r, Block.toString(r));
}
return uc.getStaleReplicas(genStamp);
}
/**
* Commit block's length and generation stamp as reported by the client.
* Set block state to {@link BlockUCState#COMMITTED}.
* @param block - contains client reported block length and generation
* @return staleReplica's List.
* @throws IOException if block ids are inconsistent.
*/
void commitBlock(Block block) throws IOException {
List<ReplicaUnderConstruction> commitBlock(Block block) throws IOException {
if (getBlockId() != block.getBlockId()) {
throw new IOException("Trying to commit inconsistent block: id = "
+ block.getBlockId() + ", expected id = " + getBlockId());
@ -316,6 +312,6 @@ public abstract class BlockInfo extends Block
uc.commit();
this.setNumBytes(block.getNumBytes());
// Sort out invalid replicas.
setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
return setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
}
}

View File

@ -848,7 +848,7 @@ public class BlockManager implements BlockStatsMXBean {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private static boolean commitBlock(final BlockInfo block,
private boolean commitBlock(final BlockInfo block,
final Block commitBlock) throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false;
@ -859,7 +859,9 @@ public class BlockManager implements BlockStatsMXBean {
throw new IOException("Commit block with mismatching GS. NN has " +
block + ", client submits " + commitBlock);
}
block.commitBlock(commitBlock);
List<ReplicaUnderConstruction> staleReplicas =
block.commitBlock(commitBlock);
removeStaleReplicas(staleReplicas, block);
return true;
}
@ -993,7 +995,8 @@ public class BlockManager implements BlockStatsMXBean {
* when tailing edit logs as a Standby.
*/
public void forceCompleteBlock(final BlockInfo block) throws IOException {
block.commitBlock(block);
List<ReplicaUnderConstruction> staleReplicas = block.commitBlock(block);
removeStaleReplicas(staleReplicas, block);
completeBlock(block, null, true);
}
@ -3638,6 +3641,16 @@ public class BlockManager implements BlockStatsMXBean {
}
}
private void removeStaleReplicas(List<ReplicaUnderConstruction> staleReplicas,
BlockInfo block) {
for (ReplicaUnderConstruction r : staleReplicas) {
removeStoredBlock(block,
r.getExpectedStorageLocation().getDatanodeDescriptor());
NameNode.blockStateChangeLog
.debug("BLOCK* Removing stale replica {}" + " of {}", r,
Block.toString(r));
}
}
/**
* Get all valid locations of the block & add the block to results
* @return the length of the added block; 0 if the block is not added. If the
@ -4090,6 +4103,13 @@ public class BlockManager implements BlockStatsMXBean {
new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
}
public void updateLastBlock(BlockInfo lastBlock, ExtendedBlock newBlock) {
lastBlock.setNumBytes(newBlock.getNumBytes());
List<ReplicaUnderConstruction> staleReplicas = lastBlock
.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
removeStaleReplicas(staleReplicas, lastBlock);
}
/** updates a block in needed reconstruction queue. */
private void updateNeededReconstructions(final BlockInfo block,
final int curReplicasDelta, int expectedReplicasDelta) {

View File

@ -4962,8 +4962,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
// Update old block with the new generation stamp and new length
lastBlock.setNumBytes(newBlock.getNumBytes());
lastBlock.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
blockManager.updateLastBlock(lastBlock, newBlock);
// find the DatanodeDescriptor objects
final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager()

View File

@ -80,6 +80,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@ -107,6 +108,7 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
@ -2424,4 +2426,34 @@ public class TestFsck {
}
}, 1000, 60000);
}
@Test(timeout = 300000)
public void testFsckCorruptWhenOneReplicaIsCorrupt()
throws Exception {
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2)
.build()) {
cluster.waitActive();
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
String filePath = "/appendTest";
Path fileName = new Path(filePath);
DFSTestUtil.createFile(fs, fileName, 512, (short) 2, 0);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
Assert.assertTrue("File not created", fs.exists(fileName));
cluster.getDataNodes().get(1).shutdown();
DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
cluster.restartDataNode(1, true);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override public Boolean get() {
return (
cluster.getNameNode(0).getNamesystem().getCorruptReplicaBlocks()
> 0);
}
}, 100, 5000);
DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
runFsck(cluster.getConfiguration(0), 0, true, "/");
}
}
}