From d993d2203840bf98f773fa1dd6e19ec2931dedbd Mon Sep 17 00:00:00 2001 From: qinyuren <1476659627@qq.com> Date: Tue, 19 Apr 2022 12:37:28 +0800 Subject: [PATCH] HDFS-16538. EC decoding failed due to not enough valid inputs (#4167) Co-authored-by: liubingxing (cherry picked from commit 52e152f8b0d5f522f3b799ea72c6c887d5d2c42d) --- .../hadoop/hdfs/StatefulStripeReader.java | 4 +++- .../hdfs/TestReadStripedFileWithDecoding.java | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java index b37501d2e2b..98a6ba13b02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java @@ -52,7 +52,9 @@ class StatefulStripeReader extends StripeReader { cur = dfsStripedInputStream.getCurStripeBuf().duplicate(); } - this.decodeInputs = new ECChunk[dataBlkNum + parityBlkNum]; + if (this.decodeInputs == null) { + this.decodeInputs = new ECChunk[dataBlkNum + parityBlkNum]; + } int bufLen = (int) alignedStripe.getSpanInBlock(); int bufOff = (int) alignedStripe.getOffsetInBlock(); for (int i = 0; i < dataBlkNum; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index 2fb9212f354..99ea6b687eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -43,6 +43,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.BLOCK_SIZE; import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.CELL_SIZE; import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.NUM_DATA_UNITS; import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.NUM_PARITY_UNITS; @@ -162,4 +163,21 @@ public class TestReadStripedFileWithDecoding { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false); } } + + @Test + public void testMoreThanOneCorruptedBlock() throws IOException { + final Path file = new Path("/corrupted"); + final int length = BLOCK_SIZE * NUM_DATA_UNITS; + final byte[] bytes = StripedFileTestUtil.generateBytes(length); + DFSTestUtil.writeFile(dfs, file, bytes); + + // read the file with more than one corrupted data block + byte[] buffer = new byte[length + 100]; + for (int count = 2; count < NUM_PARITY_UNITS; ++count) { + ReadStripedFileWithDecodingHelper.corruptBlocks(cluster, dfs, file, count, 0, + false); + StripedFileTestUtil.verifyStatefulRead(dfs, file, length, bytes, + buffer); + } + } }