HDFS-14582. Failed to start DN with ArithmeticException when NULL checksum used. Contributed by Surendra Singh Lilhore.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
parent
269b543367
commit
3a145e2918
|
@ -69,6 +69,7 @@ import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
import org.apache.hadoop.util.AutoCloseableLock;
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
import org.apache.hadoop.util.DataChecksum.Type;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
|
@ -802,6 +803,10 @@ class BlockPoolSlice {
|
||||||
// read and handle the common header here. For now just a version
|
// read and handle the common header here. For now just a version
|
||||||
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
|
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
|
||||||
checksumIn, metaFile);
|
checksumIn, metaFile);
|
||||||
|
if (Type.NULL.equals(checksum.getChecksumType())) {
|
||||||
|
// in case of NULL checksum type consider full file as valid
|
||||||
|
return blockFileLen;
|
||||||
|
}
|
||||||
int bytesPerChecksum = checksum.getBytesPerChecksum();
|
int bytesPerChecksum = checksum.getBytesPerChecksum();
|
||||||
int checksumSize = checksum.getChecksumSize();
|
int checksumSize = checksum.getChecksumSize();
|
||||||
long numChunks = Math.min(
|
long numChunks = Math.min(
|
||||||
|
|
|
@ -20,10 +20,13 @@ package org.apache.hadoop.hdfs;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -31,6 +34,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
|
@ -176,4 +180,43 @@ public class TestDFSInputStream {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNullCheckSumWhenDNRestarted()
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
||||||
|
.build();
|
||||||
|
cluster.waitActive();
|
||||||
|
try {
|
||||||
|
DistributedFileSystem fs = cluster.getFileSystem();
|
||||||
|
|
||||||
|
int chunkSize = 512;
|
||||||
|
Random r = new Random(12345L);
|
||||||
|
byte[] data = new byte[chunkSize];
|
||||||
|
r.nextBytes(data);
|
||||||
|
|
||||||
|
Path file = new Path("/testfile");
|
||||||
|
try (FSDataOutputStream fout = fs.create(file)) {
|
||||||
|
fout.write(data);
|
||||||
|
fout.hflush();
|
||||||
|
cluster.restartDataNode(0, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for block to load
|
||||||
|
Thread.sleep(1000);
|
||||||
|
|
||||||
|
// fetch live DN
|
||||||
|
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||||
|
cluster.getNameNode().getNamesystem().getBlockManager()
|
||||||
|
.getDatanodeManager().fetchDatanodes(live, null, false);
|
||||||
|
assertTrue("DN start should be success and live dn should be 2",
|
||||||
|
live.size() == 2);
|
||||||
|
assertTrue("File size should be " + chunkSize,
|
||||||
|
fs.getFileStatus(file).getLen() == chunkSize);
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue