HDFS-14101. Random failure of testListCorruptFilesCorruptedBlock. Contributed by Zsolt Venczel, Nikhil Navadia.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
Co-authored-by: Nikhil Navadia <nikhil.navadiya@cloudera.com>
This commit is contained in:
Zsolt Venczel 2019-06-18 14:36:19 -07:00 committed by Wei-Chiu Chuang
parent 1a99f97945
commit 7c00756aff
1 changed files with 16 additions and 14 deletions

View File

@ -63,7 +63,6 @@ public class TestListCorruptFileBlocks {
@Test (timeout=300000)
public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster = null;
Random random = new Random();
try {
Configuration conf = new HdfsConfiguration();
@ -74,10 +73,13 @@ public class TestListCorruptFileBlocks {
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
// Files are corrupted with 2 bytes before the end of the file,
// so that's the minimum length.
final int corruptionLength = 2;
// create two files with one block each
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
setMaxLevels(1).setMaxSize(512).build();
setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
util.createFiles(fs, "/srcdat10");
// fetch bad file list from namenode. There should be none.
@ -98,14 +100,13 @@ public class TestListCorruptFileBlocks {
File metaFile = metaFiles.get(0);
RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
FileChannel channel = file.getChannel();
long position = channel.size() - 2;
int length = 2;
byte[] buffer = new byte[length];
random.nextBytes(buffer);
long position = channel.size() - corruptionLength;
byte[] buffer = new byte[corruptionLength];
new Random(13L).nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() +
" at offset " + position + " length " + length);
" at offset " + position + " length " + corruptionLength);
// read all files to trigger detection of corrupted replica
try {
@ -134,7 +135,6 @@ public class TestListCorruptFileBlocks {
@Test (timeout=300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster = null;
Random random = new Random();
try {
Configuration conf = new HdfsConfiguration();
@ -155,10 +155,13 @@ public class TestListCorruptFileBlocks {
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
FileSystem fs = cluster.getFileSystem();
// Files are corrupted with 2 bytes before the end of the file,
// so that's the minimum length.
final int corruptionLength = 2;
// create two files with one block each
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
setMaxLevels(1).setMaxSize(512).build();
setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
util.createFiles(fs, "/srcdat10");
// fetch bad file list from namenode. There should be none.
@ -178,14 +181,13 @@ public class TestListCorruptFileBlocks {
File metaFile = metaFiles.get(0);
RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
FileChannel channel = file.getChannel();
long position = channel.size() - 2;
int length = 2;
byte[] buffer = new byte[length];
random.nextBytes(buffer);
long position = channel.size() - corruptionLength;
byte[] buffer = new byte[corruptionLength];
new Random(13L).nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() +
" at offset " + position + " length " + length);
" at offset " + position + " length " + corruptionLength);
// read all files to trigger detection of corrupted replica
try {