HDFS-5843. Merge change r1562927 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1562929 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
28b178f55e
commit
4553487c52
|
@ -18,6 +18,9 @@ Release 2.4.0 - UNRELEASED
|
||||||
HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the
|
HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the
|
||||||
docs) to trunk. (Akira Ajisaka via Arpit Agarwal)
|
docs) to trunk. (Akira Ajisaka via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is
|
||||||
|
disabled. (Laurent Goujon via jing9)
|
||||||
|
|
||||||
Release 2.3.0 - UNRELEASED
|
Release 2.3.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -655,8 +655,9 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
||||||
final DataChecksum checksum = header.getChecksum();
|
final DataChecksum checksum = header.getChecksum();
|
||||||
final int bytesPerCRC = checksum.getBytesPerChecksum();
|
final int bytesPerCRC = checksum.getBytesPerChecksum();
|
||||||
final long crcPerBlock = (metadataIn.getLength()
|
final long crcPerBlock = checksum.getChecksumSize() > 0
|
||||||
- BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
|
? (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize()
|
||||||
|
: 0;
|
||||||
|
|
||||||
//compute block checksum
|
//compute block checksum
|
||||||
final MD5Hash md5 = MD5Hash.digest(checksumIn);
|
final MD5Hash md5 = MD5Hash.digest(checksumIn);
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class TestFSOutputSummer {
|
||||||
cleanupFile(name);
|
cleanupFile(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create a file, write data with vairable amount of data */
|
/* create a file, write data with variable amount of data */
|
||||||
private void writeFile3(Path name) throws Exception {
|
private void writeFile3(Path name) throws Exception {
|
||||||
FSDataOutputStream stm = fileSys.create(name, true,
|
FSDataOutputStream stm = fileSys.create(name, true,
|
||||||
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
|
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||||
|
@ -103,6 +103,8 @@ public class TestFSOutputSummer {
|
||||||
stm.readFully(0, actual);
|
stm.readFully(0, actual);
|
||||||
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
|
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
|
||||||
stm.close();
|
stm.close();
|
||||||
|
// do a sanity check. Get the file checksum
|
||||||
|
fileSys.getFileChecksum(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void cleanupFile(Path name) throws IOException {
|
private void cleanupFile(Path name) throws IOException {
|
||||||
|
@ -112,13 +114,20 @@ public class TestFSOutputSummer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test write opeation for output stream in DFS.
|
* Test write operation for output stream in DFS.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testFSOutputSummer() throws Exception {
|
public void testFSOutputSummer() throws Exception {
|
||||||
|
doTestFSOutputSummer("CRC32");
|
||||||
|
doTestFSOutputSummer("CRC32C");
|
||||||
|
doTestFSOutputSummer("NULL");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void doTestFSOutputSummer(String checksumType) throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
|
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
|
||||||
|
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.numDataNodes(NUM_OF_DATANODES)
|
.numDataNodes(NUM_OF_DATANODES)
|
||||||
.build();
|
.build();
|
||||||
|
|
Loading…
Reference in New Issue