HDFS-16509. Fix decommission UnsupportedOperationException (#4077). Contributed by daimin.

This commit is contained in:
daimin 2022-04-14 11:07:06 +08:00 committed by GitHub
parent 2efab92959
commit c65c383b7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 21 additions and 2 deletions

View File

@ -390,8 +390,10 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
// Remove the block from the list if it's no longer in the block map, // Remove the block from the list if it's no longer in the block map,
// e.g. the containing file has been deleted // e.g. the containing file has been deleted
if (blockManager.blocksMap.getStoredBlock(block) == null) { if (blockManager.blocksMap.getStoredBlock(block) == null) {
LOG.trace("Removing unknown block {}", block); if (pruneReliableBlocks) {
it.remove(); LOG.trace("Removing unknown block {}", block);
it.remove();
}
continue; continue;
} }

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
@ -64,6 +65,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -673,6 +675,21 @@ public class TestDecommission extends AdminStatesBaseTest {
fdos.close(); fdos.close();
} }
@Test(timeout = 20000)
public void testDecommissionWithUnknownBlock() throws IOException {
startCluster(1, 3);
FSNamesystem ns = getCluster().getNamesystem(0);
DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1);
DatanodeDescriptor dn = datanodeManager.getDatanodes().iterator().next();
dn.getStorageInfos()[0].addBlock(blk, blk);
datanodeManager.getDatanodeAdminManager().startDecommission(dn);
waitNodeState(dn, DatanodeInfo.AdminStates.DECOMMISSIONED);
}
private static String scanIntoString(final ByteArrayOutputStream baos) { private static String scanIntoString(final ByteArrayOutputStream baos) {
final TextStringBuilder sb = new TextStringBuilder(); final TextStringBuilder sb = new TextStringBuilder();
final Scanner scanner = new Scanner(baos.toString()); final Scanner scanner = new Scanner(baos.toString());