diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index e1c8ae3d243..87589c5e323 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -63,7 +63,6 @@ public class TestListCorruptFileBlocks { @Test (timeout=300000) public void testListCorruptFilesCorruptedBlock() throws Exception { MiniDFSCluster cluster = null; - Random random = new Random(); try { Configuration conf = new HdfsConfiguration(); @@ -74,10 +73,13 @@ public class TestListCorruptFileBlocks { cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); + // Files are corrupted with 2 bytes before the end of the file, + // so that's the minimum length. + final int corruptionLength = 2; // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder(). setName("testCorruptFilesCorruptedBlock").setNumFiles(2). - setMaxLevels(1).setMaxSize(512).build(); + setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. @@ -98,14 +100,13 @@ public class TestListCorruptFileBlocks { File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; - random.nextBytes(buffer); + long position = channel.size() - corruptionLength; + byte[] buffer = new byte[corruptionLength]; + new Random(13L).nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + - " at offset " + position + " length " + length); + " at offset " + position + " length " + corruptionLength); // read all files to trigger detection of corrupted replica try { @@ -134,7 +135,6 @@ public class TestListCorruptFileBlocks { @Test (timeout=300000) public void testListCorruptFileBlocksInSafeMode() throws Exception { MiniDFSCluster cluster = null; - Random random = new Random(); try { Configuration conf = new HdfsConfiguration(); @@ -155,10 +155,13 @@ public class TestListCorruptFileBlocks { HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); FileSystem fs = cluster.getFileSystem(); + // Files are corrupted with 2 bytes before the end of the file, + // so that's the minimum length. + final int corruptionLength = 2; // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder(). setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2). - setMaxLevels(1).setMaxSize(512).build(); + setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. @@ -178,14 +181,13 @@ public class TestListCorruptFileBlocks { File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; - random.nextBytes(buffer); + long position = channel.size() - corruptionLength; + byte[] buffer = new byte[corruptionLength]; + new Random(13L).nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + - " at offset " + position + " length " + length); + " at offset " + position + " length " + corruptionLength); // read all files to trigger detection of corrupted replica try {