HDFS-15815. if required storageType are unavailable, log the failed reason during choosing Datanode. Contributed by Yang Yun.
This commit is contained in:
parent
0ed9e36282
commit
e391844e8e
|
@ -81,7 +81,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
NODE_STALE("the node is stale"),
|
||||
NODE_TOO_BUSY("the node is too busy"),
|
||||
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
|
||||
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
|
||||
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
|
||||
NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable");
|
||||
|
||||
private final String text;
|
||||
|
||||
|
@ -822,6 +823,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
includeType = type;
|
||||
break;
|
||||
}
|
||||
logNodeIsNotChosen(null,
|
||||
NodeNotChosenReason.NO_REQUIRED_STORAGE_TYPE,
|
||||
" for storage type " + type);
|
||||
}
|
||||
} else {
|
||||
chosenNode = chooseDataNode(scope, excludedNodes);
|
||||
|
@ -958,7 +962,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
// build the error message for later use.
|
||||
debugLoggingBuilder.get()
|
||||
.append("\n Datanode ").append(node)
|
||||
.append("\n Datanode ").append((node==null)?"None":node)
|
||||
.append(" is not chosen since ").append(reason.getText());
|
||||
if (reasonDetails != null) {
|
||||
debugLoggingBuilder.get().append(" ").append(reasonDetails);
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
|
@ -1648,4 +1649,19 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
|
|||
|
||||
assertFalse(bppd.excludeNodeByLoad(node));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChosenFailureForStorageType() {
|
||||
final LogVerificationAppender appender = new LogVerificationAppender();
|
||||
final Logger logger = Logger.getRootLogger();
|
||||
logger.addAppender(appender);
|
||||
|
||||
DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
|
||||
dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
|
||||
BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
|
||||
HdfsConstants.StoragePolicy.COLD.value()), null);
|
||||
assertEquals(0, targets.length);
|
||||
assertNotEquals(0,
|
||||
appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue