From 1f3e7374078f087d025ee252399b5b09383b0cd7 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Tue, 12 Feb 2019 21:57:57 +0530 Subject: [PATCH] HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. Contributed by Ayush Saxena. --- .../hdfs/server/namenode/NamenodeFsck.java | 56 ++++++++++++------- .../blockmanagement/TestBlockInfoStriped.java | 45 +++++++++++++++ 2 files changed, 80 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 0201ca11610..7e4709c2364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -302,29 +302,22 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { if (blockManager.getCorruptReplicas(block) != null) { corruptionRecord = blockManager.getCorruptReplicas(block); } - - //report block replicas status on datanodes - for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) { - DatanodeDescriptor dn = blockInfo.getDatanode(idx); - out.print("Block replica on datanode/rack: " + dn.getHostName() + - dn.getNetworkLocation() + " "); - if (corruptionRecord != null && corruptionRecord.contains(dn)) { - out.print(CORRUPT_STATUS + "\t ReasonCode: " + - blockManager.getCorruptReason(block, dn)); - } else if (dn.isDecommissioned() ){ - out.print(DECOMMISSIONED_STATUS); - } else if (dn.isDecommissionInProgress()) { - out.print(DECOMMISSIONING_STATUS); - } else if (this.showMaintenanceState && dn.isEnteringMaintenance()) { - out.print(ENTERING_MAINTENANCE_STATUS); - } else if (this.showMaintenanceState && dn.isInMaintenance()) { - out.print(IN_MAINTENANCE_STATUS); - } else { - out.print(HEALTHY_STATUS); + // report block replicas status on datanodes + if (blockInfo.isStriped()) { + for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) { + DatanodeDescriptor dn = blockInfo.getDatanode(idx); + if (dn == null) { + continue; + } + printDatanodeReplicaStatus(block, corruptionRecord, dn); + } + } else { + for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) { + DatanodeDescriptor dn = blockInfo.getDatanode(idx); + printDatanodeReplicaStatus(block, corruptionRecord, dn); } - out.print("\n"); } - } catch (Exception e){ + } catch (Exception e) { String errMsg = "Fsck on blockId '" + blockId; LOG.warn(errMsg, e); out.println(e.getMessage()); @@ -335,6 +328,27 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { } } + private void printDatanodeReplicaStatus(Block block, + Collection corruptionRecord, DatanodeDescriptor dn) { + out.print("Block replica on datanode/rack: " + dn.getHostName() + + dn.getNetworkLocation() + " "); + if (corruptionRecord != null && corruptionRecord.contains(dn)) { + out.print(CORRUPT_STATUS + "\t ReasonCode: " + + blockManager.getCorruptReason(block, dn)); + } else if (dn.isDecommissioned()){ + out.print(DECOMMISSIONED_STATUS); + } else if (dn.isDecommissionInProgress()) { + out.print(DECOMMISSIONING_STATUS); + } else if (this.showMaintenanceState && dn.isEnteringMaintenance()) { + out.print(ENTERING_MAINTENANCE_STATUS); + } else if (this.showMaintenanceState && dn.isInMaintenance()) { + out.print(IN_MAINTENANCE_STATUS); + } else { + out.print(HEALTHY_STATUS); + } + out.print("\n"); + } + /** * Check files on DFS, starting from the indicated path. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java index becf8683331..0982c80e81b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java @@ -17,10 +17,18 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.tools.DFSck; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -31,12 +39,15 @@ import org.mockito.internal.util.reflection.Whitebox; import java.io.DataOutput; import java.io.DataOutputStream; +import java.io.File; +import java.io.PrintStream; import java.io.ByteArrayOutputStream; import java.nio.ByteBuffer; import java.util.Collection; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.fail; /** @@ -212,6 +223,40 @@ public class TestBlockInfoStriped { } } + @Test + public void testGetBlockInfo() throws IllegalArgumentException, Exception { + int dataBlocks = testECPolicy.getNumDataUnits(); + int parityBlocks = testECPolicy.getNumParityUnits(); + int totalSize = dataBlocks + parityBlocks; + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + Configuration conf = new Configuration(); + try (MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(totalSize) + .build()) { + DistributedFileSystem fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); + fs.enableErasureCodingPolicy(testECPolicy.getName()); + fs.mkdirs(new Path("/ecDir")); + fs.setErasureCodingPolicy(new Path("/ecDir"), testECPolicy.getName()); + DFSTestUtil.createFile(fs, new Path("/ecDir/ecFile"), + fs.getDefaultBlockSize() * dataBlocks, (short) 1, 1024); + ExtendedBlock blk = DFSTestUtil + .getAllBlocks(fs, new Path("/ecDir/ecFile")).get(0).getBlock(); + String id = "blk_" + Long.toString(blk.getBlockId()); + BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager() + .getStoredBlock(blk.getLocalBlock()); + DatanodeStorageInfo[] dnStorageInfo = cluster.getNameNode() + .getNamesystem().getBlockManager().getStorages(bInfo); + bInfo.removeStorage(dnStorageInfo[1]); + ByteArrayOutputStream bStream = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bStream, true); + assertEquals(0, ToolRunner.run(new DFSck(conf, out), new String[] { + new Path("/ecDir/ecFile").toString(), "-blockId", id })); + assertFalse(out.toString().contains("null")); + } + } + @Test public void testWrite() { long blkID = 1;