HDFS-9705. Refine the behaviour of getFileChecksum when length = 0. Contributed by SammiChen and Kai Zheng.
(cherry picked from commit 82b4a9c3d0
)
This commit is contained in:
parent
5e751f3367
commit
7b8f87eb78
|
@ -1950,12 +1950,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
// If there is no block allocated for the file,
|
||||
// return one with the magic entry that matches what previous
|
||||
// hdfs versions return.
|
||||
if (locatedblocks.size() == 0) {
|
||||
if (locatedblocks.size() == 0 || length == 0) {
|
||||
return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
|
||||
}
|
||||
|
||||
// we should never get here since the validity was checked
|
||||
// when getCrcType() was called above.
|
||||
// We will get here if above condition is not met.
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -816,13 +816,6 @@ public class TestDistributedFileSystem {
|
|||
out.close();
|
||||
}
|
||||
|
||||
// verify the magic val for zero byte files
|
||||
{
|
||||
final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
|
||||
assertEquals(zeroChecksum.toString(),
|
||||
"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51");
|
||||
}
|
||||
|
||||
//write another file
|
||||
final Path bar = new Path(dir, "bar" + n);
|
||||
{
|
||||
|
@ -831,8 +824,37 @@ public class TestDistributedFileSystem {
|
|||
out.write(data);
|
||||
out.close();
|
||||
}
|
||||
|
||||
{ //verify checksum
|
||||
|
||||
{
|
||||
// verify the magic val for zero byte file
|
||||
final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
|
||||
final String magicValue =
|
||||
"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51";
|
||||
// verify the magic val for zero byte files
|
||||
assertEquals(magicValue, zeroChecksum.toString());
|
||||
|
||||
// verify checksum for empty file and 0 request length
|
||||
final FileChecksum checksumWith0 = hdfs.getFileChecksum(bar, 0);
|
||||
assertEquals(zeroChecksum, checksumWith0);
|
||||
|
||||
// verify none existent file
|
||||
try {
|
||||
hdfs.getFileChecksum(new Path(dir, "none-existent"), 8);
|
||||
fail();
|
||||
} catch (Exception ioe) {
|
||||
FileSystem.LOG.info("GOOD: getting an exception", ioe);
|
||||
}
|
||||
|
||||
// verify none existent file and 0 request length
|
||||
try {
|
||||
final FileChecksum noneExistentChecksumWith0 =
|
||||
hdfs.getFileChecksum(new Path(dir, "none-existent"), 0);
|
||||
fail();
|
||||
} catch (Exception ioe) {
|
||||
FileSystem.LOG.info("GOOD: getting an exception", ioe);
|
||||
}
|
||||
|
||||
// verify checksums
|
||||
final FileChecksum barcs = hdfs.getFileChecksum(bar);
|
||||
final int barhashcode = barcs.hashCode();
|
||||
assertEquals(hdfsfoocs.hashCode(), barhashcode);
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -27,6 +28,7 @@ import java.util.Random;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.junit.Test;
|
||||
|
@ -104,7 +106,11 @@ public class TestFSOutputSummer {
|
|||
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
|
||||
stm.close();
|
||||
// do a sanity check. Get the file checksum
|
||||
fileSys.getFileChecksum(name);
|
||||
FileChecksum fileChecksum = fileSys.getFileChecksum(name);
|
||||
if (fileSys.getConf().get(
|
||||
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY).toString().equals("NULL")) {
|
||||
assertNull(fileChecksum);
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanupFile(Path name) throws IOException {
|
||||
|
|
Loading…
Reference in New Issue