HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is disabled. Contributed by Laurent Goujon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1562927 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-01-30 19:15:13 +00:00
parent 95074f0867
commit 3d9ad8e3b6
3 changed files with 17 additions and 4 deletions

View File

@ -304,6 +304,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the
docs) to trunk. (Akira Ajisaka via Arpit Agarwal) docs) to trunk. (Akira Ajisaka via Arpit Agarwal)
HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is
disabled. (Laurent Goujon via jing9)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -655,8 +655,9 @@ public void blockChecksum(final ExtendedBlock block,
final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
final DataChecksum checksum = header.getChecksum(); final DataChecksum checksum = header.getChecksum();
final int bytesPerCRC = checksum.getBytesPerChecksum(); final int bytesPerCRC = checksum.getBytesPerChecksum();
final long crcPerBlock = (metadataIn.getLength() final long crcPerBlock = checksum.getChecksumSize() > 0
- BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize(); ? (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize()
: 0;
//compute block checksum //compute block checksum
final MD5Hash md5 = MD5Hash.digest(checksumIn); final MD5Hash md5 = MD5Hash.digest(checksumIn);

View File

@ -71,7 +71,7 @@ private void writeFile2(Path name) throws Exception {
cleanupFile(name); cleanupFile(name);
} }
/* create a file, write data with vairable amount of data */ /* create a file, write data with variable amount of data */
private void writeFile3(Path name) throws Exception { private void writeFile3(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true, FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
@ -103,6 +103,8 @@ private void checkFile(Path name) throws Exception {
stm.readFully(0, actual); stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test"); checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close(); stm.close();
// do a sanity check. Get the file checksum
fileSys.getFileChecksum(name);
} }
private void cleanupFile(Path name) throws IOException { private void cleanupFile(Path name) throws IOException {
@ -112,13 +114,20 @@ private void cleanupFile(Path name) throws IOException {
} }
/** /**
* Test write opeation for output stream in DFS. * Test write operation for output stream in DFS.
*/ */
@Test @Test
public void testFSOutputSummer() throws Exception { public void testFSOutputSummer() throws Exception {
doTestFSOutputSummer("CRC32");
doTestFSOutputSummer("CRC32C");
doTestFSOutputSummer("NULL");
}
private void doTestFSOutputSummer(String checksumType) throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_OF_DATANODES) .numDataNodes(NUM_OF_DATANODES)
.build(); .build();