HDFS-14101. Random failure of testListCorruptFilesCorruptedBlock. Contributed by Zsolt Venczel, Nikhil Navadia.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
Co-authored-by: Nikhil Navadia <nikhil.navadiya@cloudera.com>
(cherry picked from commit 7c00756aff)
(cherry picked from commit 4ea3b04bf3)
(cherry picked from commit 76faa41f18)
This commit is contained in:
Zsolt Venczel 2019-06-18 14:36:19 -07:00 committed by Wei-Chiu Chuang
parent abd11f9db1
commit a68de43957
1 changed files with 16 additions and 14 deletions

View File

@ -63,7 +63,6 @@ public class TestListCorruptFileBlocks {
@Test (timeout=300000) @Test (timeout=300000)
public void testListCorruptFilesCorruptedBlock() throws Exception { public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Random random = new Random();
try { try {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
@ -74,10 +73,13 @@ public class TestListCorruptFileBlocks {
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
// Files are corrupted with 2 bytes before the end of the file,
// so that's the minimum length.
final int corruptionLength = 2;
// create two files with one block each // create two files with one block each
DFSTestUtil util = new DFSTestUtil.Builder(). DFSTestUtil util = new DFSTestUtil.Builder().
setName("testCorruptFilesCorruptedBlock").setNumFiles(2). setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
setMaxLevels(1).setMaxSize(512).build(); setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
util.createFiles(fs, "/srcdat10"); util.createFiles(fs, "/srcdat10");
// fetch bad file list from namenode. There should be none. // fetch bad file list from namenode. There should be none.
@ -98,14 +100,13 @@ public class TestListCorruptFileBlocks {
File metaFile = metaFiles.get(0); File metaFile = metaFiles.get(0);
RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
FileChannel channel = file.getChannel(); FileChannel channel = file.getChannel();
long position = channel.size() - 2; long position = channel.size() - corruptionLength;
int length = 2; byte[] buffer = new byte[corruptionLength];
byte[] buffer = new byte[length]; new Random(13L).nextBytes(buffer);
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position); channel.write(ByteBuffer.wrap(buffer), position);
file.close(); file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + LOG.info("Deliberately corrupting file " + metaFile.getName() +
" at offset " + position + " length " + length); " at offset " + position + " length " + corruptionLength);
// read all files to trigger detection of corrupted replica // read all files to trigger detection of corrupted replica
try { try {
@ -134,7 +135,6 @@ public class TestListCorruptFileBlocks {
@Test (timeout=300000) @Test (timeout=300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception { public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Random random = new Random();
try { try {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
@ -155,10 +155,13 @@ public class TestListCorruptFileBlocks {
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
// Files are corrupted with 2 bytes before the end of the file,
// so that's the minimum length.
final int corruptionLength = 2;
// create two files with one block each // create two files with one block each
DFSTestUtil util = new DFSTestUtil.Builder(). DFSTestUtil util = new DFSTestUtil.Builder().
setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2). setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
setMaxLevels(1).setMaxSize(512).build(); setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
util.createFiles(fs, "/srcdat10"); util.createFiles(fs, "/srcdat10");
// fetch bad file list from namenode. There should be none. // fetch bad file list from namenode. There should be none.
@ -178,14 +181,13 @@ public class TestListCorruptFileBlocks {
File metaFile = metaFiles.get(0); File metaFile = metaFiles.get(0);
RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
FileChannel channel = file.getChannel(); FileChannel channel = file.getChannel();
long position = channel.size() - 2; long position = channel.size() - corruptionLength;
int length = 2; byte[] buffer = new byte[corruptionLength];
byte[] buffer = new byte[length]; new Random(13L).nextBytes(buffer);
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position); channel.write(ByteBuffer.wrap(buffer), position);
file.close(); file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + LOG.info("Deliberately corrupting file " + metaFile.getName() +
" at offset " + position + " length " + length); " at offset " + position + " length " + corruptionLength);
// read all files to trigger detection of corrupted replica // read all files to trigger detection of corrupted replica
try { try {