HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. McCabe)

This commit is contained in:
Colin Patrick Mccabe 2015-04-21 11:41:22 -07:00
parent 424a00daa0
commit 997408eaac
6 changed files with 30 additions and 13 deletions

View File

@ -452,6 +452,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9) HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
HDFS-8133. Improve readability of deleted block check (Daryn Sharp via
Colin P. McCabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -86,6 +86,10 @@ public void setBlockCollection(BlockCollection bc) {
this.bc = bc; this.bc = bc;
} }
public boolean isDeleted() {
return (bc == null);
}
public DatanodeDescriptor getDatanode(int index) { public DatanodeDescriptor getDatanode(int index) {
DatanodeStorageInfo storage = getStorageInfo(index); DatanodeStorageInfo storage = getStorageInfo(index);
return storage == null ? null : storage.getDatanodeDescriptor(); return storage == null ? null : storage.getDatanodeDescriptor();

View File

@ -1165,13 +1165,14 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b,
DatanodeStorageInfo storageInfo, DatanodeStorageInfo storageInfo,
DatanodeDescriptor node) throws IOException { DatanodeDescriptor node) throws IOException {
BlockCollection bc = b.corrupted.getBlockCollection(); if (b.corrupted.isDeleted()) {
if (bc == null) {
blockLog.info("BLOCK markBlockAsCorrupt: {} cannot be marked as" + blockLog.info("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
" corrupt as it does not belong to any file", b); " corrupt as it does not belong to any file", b);
addToInvalidates(b.corrupted, node); addToInvalidates(b.corrupted, node);
return; return;
} }
short expectedReplicas =
b.corrupted.getBlockCollection().getBlockReplication();
// Add replica to the data-node if it is not already there // Add replica to the data-node if it is not already there
if (storageInfo != null) { if (storageInfo != null) {
@ -1183,13 +1184,13 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b,
b.reasonCode); b.reasonCode);
NumberReplicas numberOfReplicas = countNodes(b.stored); NumberReplicas numberOfReplicas = countNodes(b.stored);
boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= bc boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=
.getBlockReplication(); expectedReplicas;
boolean minReplicationSatisfied = boolean minReplicationSatisfied =
numberOfReplicas.liveReplicas() >= minReplication; numberOfReplicas.liveReplicas() >= minReplication;
boolean hasMoreCorruptReplicas = minReplicationSatisfied && boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
(numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
bc.getBlockReplication(); expectedReplicas;
boolean corruptedDuringWrite = minReplicationSatisfied && boolean corruptedDuringWrite = minReplicationSatisfied &&
(b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp()); (b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp());
// case 1: have enough number of live replicas // case 1: have enough number of live replicas
@ -2509,7 +2510,7 @@ private Block addStoredBlock(final BlockInfoContiguous block,
} else { } else {
storedBlock = block; storedBlock = block;
} }
if (storedBlock == null || storedBlock.getBlockCollection() == null) { if (storedBlock == null || storedBlock.isDeleted()) {
// If this block does not belong to anyfile, then we are done. // If this block does not belong to anyfile, then we are done.
blockLog.info("BLOCK* addStoredBlock: {} on {} size {} but it does not" + blockLog.info("BLOCK* addStoredBlock: {} on {} size {} but it does not" +
" belong to any file", block, node, block.getNumBytes()); " belong to any file", block, node, block.getNumBytes());
@ -2794,8 +2795,7 @@ public double getReplicationQueuesInitProgress() {
* what happened with it. * what happened with it.
*/ */
private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block) { private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block) {
BlockCollection bc = block.getBlockCollection(); if (block.isDeleted()) {
if (bc == null) {
// block does not belong to any file // block does not belong to any file
addToInvalidates(block); addToInvalidates(block);
return MisReplicationResult.INVALID; return MisReplicationResult.INVALID;
@ -2806,7 +2806,8 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block
return MisReplicationResult.UNDER_CONSTRUCTION; return MisReplicationResult.UNDER_CONSTRUCTION;
} }
// calculate current replication // calculate current replication
short expectedReplication = bc.getBlockReplication(); short expectedReplication =
block.getBlockCollection().getBlockReplication();
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas(); int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be // add to under-replicated queue if need to be

View File

@ -193,7 +193,7 @@ boolean removeNode(Block b, DatanodeDescriptor node) {
boolean removed = node.removeBlock(info); boolean removed = node.removeBlock(info);
if (info.getDatanode(0) == null // no datanodes left if (info.getDatanode(0) == null // no datanodes left
&& info.getBlockCollection() == null) { // does not belong to a file && info.isDeleted()) { // does not belong to a file
blocks.remove(b); // remove block from the map blocks.remove(b); // remove block from the map
} }
return removed; return removed;

View File

@ -4260,13 +4260,12 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
// this method to add a CloseOp to the edit log for an already deleted // this method to add a CloseOp to the edit log for an already deleted
// file (See HDFS-6825). // file (See HDFS-6825).
// //
BlockCollection blockCollection = storedBlock.getBlockCollection(); if (storedBlock.isDeleted()) {
if (blockCollection == null) {
throw new IOException("The blockCollection of " + storedBlock throw new IOException("The blockCollection of " + storedBlock
+ " is null, likely because the file owning this block was" + " is null, likely because the file owning this block was"
+ " deleted and the block removal is delayed"); + " deleted and the block removal is delayed");
} }
INodeFile iFile = ((INode)blockCollection).asFile(); INodeFile iFile = ((INode)storedBlock.getBlockCollection()).asFile();
if (isFileDeleted(iFile)) { if (isFileDeleted(iFile)) {
throw new FileNotFoundException("File not found: " throw new FileNotFoundException("File not found: "
+ iFile.getFullPathName() + ", likely due to delayed block" + iFile.getFullPathName() + ", likely due to delayed block"

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
/** /**
* This class provides tests for BlockInfo class, which is used in BlocksMap. * This class provides tests for BlockInfo class, which is used in BlocksMap.
@ -46,6 +47,15 @@ public class TestBlockInfo {
private static final Log LOG = LogFactory private static final Log LOG = LogFactory
.getLog("org.apache.hadoop.hdfs.TestBlockInfo"); .getLog("org.apache.hadoop.hdfs.TestBlockInfo");
@Test
public void testIsDeleted() {
BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
blockInfo.setBlockCollection(bc);
Assert.assertFalse(blockInfo.isDeleted());
blockInfo.setBlockCollection(null);
Assert.assertTrue(blockInfo.isDeleted());
}
@Test @Test
public void testAddStorage() throws Exception { public void testAddStorage() throws Exception {