HDFS-14582. Failed to start DN with ArithmeticException when NULL checksum used. Contributed by Surendra Singh Lilhore.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
(cherry picked from commit f95988113da3f06f6d975f99f1ee51d88a793537)
(cherry picked from commit 03c62c7989)
This commit is contained in:
Surendra Singh Lilhore 2019-08-20 15:53:53 -07:00 committed by Wei-Chiu Chuang
parent 3547261749
commit c2aeeb01fa
2 changed files with 48 additions and 0 deletions

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DataChecksum.Type;
import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
@ -796,6 +797,10 @@ class BlockPoolSlice {
// read and handle the common header here. For now just a version // read and handle the common header here. For now just a version
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum( final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
checksumIn, metaFile); checksumIn, metaFile);
if (Type.NULL.equals(checksum.getChecksumType())) {
// in case of NULL checksum type consider full file as valid
return blockFileLen;
}
int bytesPerChecksum = checksum.getBytesPerChecksum(); int bytesPerChecksum = checksum.getBytesPerChecksum();
int checksumSize = checksum.getChecksumSize(); int checksumSize = checksum.getChecksumSize();
long numChunks = Math.min( long numChunks = Math.min(

View File

@ -20,10 +20,13 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -31,6 +34,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@ -176,4 +180,43 @@ public class TestDFSInputStream {
cluster.shutdown(); cluster.shutdown();
} }
} }
@Test
public void testNullCheckSumWhenDNRestarted()
throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.set(HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
cluster.waitActive();
try {
DistributedFileSystem fs = cluster.getFileSystem();
int chunkSize = 512;
Random r = new Random(12345L);
byte[] data = new byte[chunkSize];
r.nextBytes(data);
Path file = new Path("/testfile");
try (FSDataOutputStream fout = fs.create(file)) {
fout.write(data);
fout.hflush();
cluster.restartDataNode(0, true, true);
}
// wait for block to load
Thread.sleep(1000);
// fetch live DN
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().fetchDatanodes(live, null, false);
assertTrue("DN start should be success and live dn should be 2",
live.size() == 2);
assertTrue("File size should be " + chunkSize,
fs.getFileStatus(file).getLen() == chunkSize);
} finally {
cluster.shutdown();
}
}
} }