From c7c36cbd6218f46c33d7fb2f60cd52cb29e6d720 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 15 Oct 2015 11:24:14 -0500 Subject: [PATCH] HDFS-9220. Reading small file (< 512 bytes) that is open for append fails due to incorrect checksum. Contributed by Jing Zhao. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/datanode/BlockReceiver.java | 5 +-- .../apache/hadoop/hdfs/TestFileAppend2.java | 37 +++++++++++++++++++ 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 674b090e97e..c7a2423948d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2129,6 +2129,9 @@ Release 2.7.2 - UNRELEASED HDFS-8676. Delayed rolling upgrade finalization can cause heartbeat expiration. (Walter Su via kihwal) + HDFS-9220. Reading small file (< 512 bytes) that is open for append fails + due to incorrect checksum (Jing Zhao via kihwal) + Release 2.7.1 - 2015-07-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 4c40e83b04a..99cdbea6449 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -753,11 +753,10 @@ class BlockReceiver implements Closeable { final int offset = checksumBuf.arrayOffset() + checksumBuf.position() + skip; final int end = offset + checksumLen - skip; - // If offset > end, there is no more checksum to write. + // If offset >= end, there is no more checksum to write. // I.e. a partial chunk checksum rewrite happened and there is no // more to write after that. - if (offset > end) { - assert crcBytes != null; + if (offset >= end && doCrcRecalc) { lastCrc = crcBytes; } else { final int remainingBytes = checksumLen - skip; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index 8a950272b9e..3c72db35bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -30,6 +30,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -549,4 +550,40 @@ public class TestFileAppend2 { public void testComplexAppend2() throws IOException { testComplexAppend(true); } + + /** + * Make sure when the block length after appending is less than 512 bytes, the + * checksum re-calculation and overwrite are performed correctly. + */ + @Test + public void testAppendLessThanChecksumChunk() throws Exception { + final byte[] buf = new byte[1024]; + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new HdfsConfiguration()).numDataNodes(1).build(); + cluster.waitActive(); + + try (DistributedFileSystem fs = cluster.getFileSystem()) { + final int len1 = 200; + final int len2 = 300; + final Path p = new Path("/foo"); + + FSDataOutputStream out = fs.create(p); + out.write(buf, 0, len1); + out.close(); + + out = fs.append(p); + out.write(buf, 0, len2); + // flush but leave open + out.hflush(); + + // read data to verify the replica's content and checksum are correct + FSDataInputStream in = fs.open(p); + final int length = in.read(0, buf, 0, len1 + len2); + assertTrue(length > 0); + in.close(); + out.close(); + } finally { + cluster.shutdown(); + } + } }