HDFS-15815. if required storageType are unavailable, log the failed reason during choosing Datanode. Contributed by Yang Yun. (#2882)

(cherry picked from commit e391844e8e)

Co-authored-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
Wei-Chiu Chuang 2021-04-12 23:55:32 -07:00 committed by GitHub
parent 77315abe47
commit bfba6f1f3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 23 additions and 3 deletions

View File

@ -81,7 +81,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
NODE_STALE("the node is stale"),
NODE_TOO_BUSY("the node is too busy"),
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable");
private final String text;
@ -822,6 +823,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
includeType = type;
break;
}
logNodeIsNotChosen(null,
NodeNotChosenReason.NO_REQUIRED_STORAGE_TYPE,
" for storage type " + type);
}
} else {
chosenNode = chooseDataNode(scope, excludedNodes);
@ -958,7 +962,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
if (LOG.isDebugEnabled()) {
// build the error message for later use.
debugLoggingBuilder.get()
.append("\n Datanode ").append(node)
.append("\n Datanode ").append((node==null)?"None":node)
.append(" is not chosen since ").append(reason.getText());
if (reasonDetails != null) {
debugLoggingBuilder.get().append(" ").append(reasonDetails);

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@ -1648,4 +1649,19 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
assertFalse(bppd.excludeNodeByLoad(node));
}
@Test
public void testChosenFailureForStorageType() {
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
HdfsConstants.COLD_STORAGE_POLICY_ID), null);
assertEquals(0, targets.length);
assertNotEquals(0,
appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
}
}