From 0ef1a13f019c22a99300e6a683b0d1af5193663c Mon Sep 17 00:00:00 2001 From: daimin Date: Thu, 14 Apr 2022 11:07:06 +0800 Subject: [PATCH] HDFS-16509. Fix decommission UnsupportedOperationException (#4077). Contributed by daimin. (cherry picked from commit c65c383b7ebef48c638607f15ba35d61554982cb) --- .../DatanodeAdminDefaultMonitor.java | 6 ++++-- .../apache/hadoop/hdfs/TestDecommission.java | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java index 2da3de07147..3f7be83496c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java @@ -390,8 +390,10 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase // Remove the block from the list if it's no longer in the block map, // e.g. the containing file has been deleted if (blockManager.blocksMap.getStoredBlock(block) == null) { - LOG.trace("Removing unknown block {}", block); - it.remove(); + if (pruneReliableBlocks) { + LOG.trace("Removing unknown block {}", block); + it.remove(); + } continue; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 9592a23c510..89e5cabc880 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -55,6 +55,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; @@ -65,6 +66,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -672,6 +674,21 @@ public class TestDecommission extends AdminStatesBaseTest { fdos.close(); } + @Test(timeout = 20000) + public void testDecommissionWithUnknownBlock() throws IOException { + startCluster(1, 3); + + FSNamesystem ns = getCluster().getNamesystem(0); + DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager(); + + BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1); + DatanodeDescriptor dn = datanodeManager.getDatanodes().iterator().next(); + dn.getStorageInfos()[0].addBlock(blk, blk); + + datanodeManager.getDatanodeAdminManager().startDecommission(dn); + waitNodeState(dn, DatanodeInfo.AdminStates.DECOMMISSIONED); + } + private static String scanIntoString(final ByteArrayOutputStream baos) { final TextStringBuilder sb = new TextStringBuilder(); final Scanner scanner = new Scanner(baos.toString());