diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8acc75bd914..17d0ce3d7a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1881,6 +1881,9 @@ Release 0.23.5 - UNRELEASED HDFS-3224. Bug in check for DN re-registration with different storage ID (jlowe) + HDFS-4090. getFileChecksum() result incompatible when called against + zero-byte files. (Kihwal Lee via daryn) + Release 0.23.4 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index d29ab1d0a89..3331b3a27a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1769,6 +1769,13 @@ public class DFSClient implements java.io.Closeable { return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC, crcPerBlock, fileMD5); default: + // If there is no block allocated for the file, + // return one with the magic entry that matches what previous + // hdfs versions return. + if (locatedblocks.size() == 0) { + return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5); + } + // we should never get here since the validity was checked // when getCrcType() was called above. return null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 9d2edc65832..0aec4960554 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -542,6 +542,21 @@ public class TestDistributedFileSystem { final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified); System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs); + //create a zero byte file + final Path zeroByteFile = new Path(dir, "zeroByteFile" + n); + { + final FSDataOutputStream out = hdfs.create(zeroByteFile, false, buffer_size, + (short)2, block_size); + out.close(); + } + + // verify the magic val for zero byte files + { + final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile); + assertEquals(zeroChecksum.toString(), + "MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51"); + } + //write another file final Path bar = new Path(dir, "bar" + n); {