HDFS-15815. if required storageType are unavailable, log the failed reason during choosing Datanode. Contributed by Yang Yun. (#2882)

(cherry picked from commit e391844e8e)

Co-authored-by: Ayush Saxena <ayushsaxena@apache.org>
(cherry picked from commit bfba6f1f3c)
This commit is contained in:
Wei-Chiu Chuang 2021-04-12 23:55:32 -07:00
parent 0faa626bd8
commit e6df0fb84c
2 changed files with 23 additions and 3 deletions

View File

@ -76,7 +76,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
NODE_STALE("the node is stale"),
NODE_TOO_BUSY("the node is too busy"),
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable");
private final String text;
@ -802,6 +803,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
includeType = type;
break;
}
logNodeIsNotChosen(null,
NodeNotChosenReason.NO_REQUIRED_STORAGE_TYPE,
" for storage type " + type);
}
} else {
chosenNode = chooseDataNode(scope, excludedNodes);
@ -938,7 +942,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
if (LOG.isDebugEnabled()) {
// build the error message for later use.
debugLoggingBuilder.get()
.append("\n Datanode ").append(node)
.append("\n Datanode ").append((node==null)?"None":node)
.append(" is not chosen since ").append(reason.getText());
if (reasonDetails != null) {
debugLoggingBuilder.get().append(" ").append(reasonDetails);

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@ -1619,4 +1620,19 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
assertTrue(bppd.excludeNodeByLoad(node));
}
}
@Test
public void testChosenFailureForStorageType() {
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
HdfsConstants.COLD_STORAGE_POLICY_ID), null);
assertEquals(0, targets.length);
assertNotEquals(0,
appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
}
}