diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9560b392636..4ac6c597d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -140,6 +140,9 @@ Trunk (Unreleased) HDFS-2127. Add a test that ensure AccessControlExceptions contain a full path. (Stephen Chu via eli) + HDFS-3995. Use DFSTestUtil.createFile() for file creation and + writing in test cases. (Jing Zhao via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 4d72c3d72f1..6f862a32981 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -210,27 +210,40 @@ public static String readFile(FileSystem fs, Path fileName) throws IOException { public static void createFile(FileSystem fs, Path fileName, long fileLen, short replFactor, long seed) throws IOException { + createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName), + replFactor, seed); + } + + public static void createFile(FileSystem fs, Path fileName, int bufferLen, + long fileLen, long blockSize, short replFactor, long seed) + throws IOException { + assert bufferLen > 0; if (!fs.mkdirs(fileName.getParent())) { throw new IOException("Mkdirs failed to create " + fileName.getParent().toString()); } FSDataOutputStream out = null; try { - out = fs.create(fileName, replFactor); - byte[] toWrite = new byte[1024]; - Random rb = new Random(seed); - long bytesToWrite = fileLen; - while (bytesToWrite>0) { - rb.nextBytes(toWrite); - int bytesToWriteNext = (1024 0) { + byte[] toWrite = new byte[bufferLen]; + Random rb = new Random(seed); + long bytesToWrite = fileLen; + while (bytesToWrite>0) { + rb.nextBytes(toWrite); + int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen + : (int) bytesToWrite; + + out.write(toWrite, 0, bytesToWriteNext); + bytesToWrite -= bytesToWriteNext; + } } - out.close(); - out = null; } finally { - IOUtils.closeStream(out); + if (out != null) { + out.close(); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 92ac17afdd5..77ea9c5907e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -37,7 +37,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -141,13 +140,6 @@ private void sendRecvData(String testDescription, } } - void createFile(FileSystem fs, Path path, int fileLen) throws IOException { - byte [] arr = new byte[fileLen]; - FSDataOutputStream out = fs.create(path); - out.write(arr); - out.close(); - } - void readFile(FileSystem fs, Path path, int fileLen) throws IOException { byte [] arr = new byte[fileLen]; FSDataInputStream in = fs.open(path); @@ -357,7 +349,9 @@ public void testDataTransferProtocol() throws IOException { int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096); - createFile(fileSys, file, fileLen); + DFSTestUtil.createFile(fileSys, file, fileLen, fileLen, + fileSys.getDefaultBlockSize(file), + fileSys.getDefaultReplication(file), 0L); // get the first blockid for the file final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index 28c3e9cb373..0e0d33cda3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -79,7 +79,8 @@ public static void testSetUp() throws Exception { hftpfs = cluster.getHftpFileSystem(0); dfsClient = new DFSClient(NameNode.getAddress(conf), conf); file1 = new Path("filestatus.dat"); - writeFile(fs, file1, 1, fileSize, blockSize); + DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1, + seed); } @AfterClass @@ -87,18 +88,6 @@ public static void testTearDown() throws Exception { fs.close(); cluster.shutdown(); } - - private static void writeFile(FileSystem fileSys, Path name, int repl, - int fileSize, int blockSize) throws IOException { - // Create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, - HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException, InterruptedException, TimeoutException { @@ -218,7 +207,8 @@ public void testGetFileStatusOnDir() throws Exception { // create another file that is smaller than a block. Path file2 = new Path(dir, "filestatus2.dat"); - writeFile(fs, file2, 1, blockSize/4, blockSize); + DFSTestUtil.createFile(fs, file2, blockSize/4, blockSize/4, blockSize, + (short) 1, seed); checkFile(fs, file2, 1); // verify file attributes @@ -230,7 +220,8 @@ public void testGetFileStatusOnDir() throws Exception { // Create another file in the same directory Path file3 = new Path(dir, "filestatus3.dat"); - writeFile(fs, file3, 1, blockSize/4, blockSize); + DFSTestUtil.createFile(fs, file3, blockSize/4, blockSize/4, blockSize, + (short) 1, seed); checkFile(fs, file3, 1); file3 = fs.makeQualified(file3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index c1f4a67f929..55d1aa7ee51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -110,9 +110,7 @@ public void testReadSelectNonStaleDatanode() throws Exception { // do the writing but do not close the FSDataOutputStream // in order to mimic the ongoing writing final Path fileName = new Path("/file1"); - stm = fileSys.create( - fileName, - true, + stm = fileSys.create(fileName, true, fileSys.getConf().getInt( CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3, blockSize); @@ -180,29 +178,15 @@ public void testGetBlocks() throws Exception { final short REPLICATION_FACTOR = (short) 2; final int DEFAULT_BLOCK_SIZE = 1024; - final Random r = new Random(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes( REPLICATION_FACTOR).build(); try { cluster.waitActive(); - - // create a file with two blocks - FileSystem fs = cluster.getFileSystem(); - FSDataOutputStream out = fs.create(new Path("/tmp.txt"), - REPLICATION_FACTOR); - byte[] data = new byte[1024]; long fileLen = 2 * DEFAULT_BLOCK_SIZE; - long bytesToWrite = fileLen; - while (bytesToWrite > 0) { - r.nextBytes(data); - int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 - : (int) bytesToWrite; - out.write(data, 0, bytesToWriteNext); - bytesToWrite -= bytesToWriteNext; - } - out.close(); + DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), + fileLen, REPLICATION_FACTOR, 0L); // get blocks & data nodes List locatedBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index ab28ce27d6d..a42b0344418 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -52,22 +52,6 @@ public class TestInjectionForSimulatedStorage { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage"); - - private void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) repl, blockSize); - byte[] buffer = new byte[filesize]; - for (int i=0; i[] blocksList = cluster.getAllBlockReports(bpid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java index e5ef1acfec6..3978444c6a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java @@ -50,19 +50,6 @@ public class TestModTime { Random myrand = new Random(); Path hostsFile; Path excludeFile; - - private void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) repl, blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } private void cleanupFile(FileSystem fileSys, Path name) throws IOException { assertTrue(fileSys.exists(name)); @@ -105,7 +92,8 @@ public void testModTime() throws IOException { System.out.println("Creating testdir1 and testdir1/test1.dat."); Path dir1 = new Path("testdir1"); Path file1 = new Path(dir1, "test1.dat"); - writeFile(fileSys, file1, replicas); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) replicas, seed); FileStatus stat = fileSys.getFileStatus(file1); long mtime1 = stat.getModificationTime(); assertTrue(mtime1 != 0); @@ -120,7 +108,8 @@ public void testModTime() throws IOException { // System.out.println("Creating testdir1/test2.dat."); Path file2 = new Path(dir1, "test2.dat"); - writeFile(fileSys, file2, replicas); + DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize, + (short) replicas, seed); stat = fileSys.getFileStatus(file2); // diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 55695415d88..1e0681f4711 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -41,11 +41,9 @@ public class TestPread { boolean simulatedStorage = false; private void writeFile(FileSystem fileSys, Path name) throws IOException { - // create and write a file that contains three blocks of data - DataOutputStream stm = fileSys.create(name, true, 4096, (short)1, - blockSize); // test empty file open and read - stm.close(); + DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 0, + blockSize, (short) 1, seed); FSDataInputStream in = fileSys.open(name); byte[] buffer = new byte[12 * blockSize]; in.readFully(0, buffer, 0, 0); @@ -62,11 +60,8 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException { assertTrue("Cannot delete file", false); // now create the real file - stm = fileSys.create(name, true, 4096, (short)1, blockSize); - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); + DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize, + blockSize, (short) 1, seed); } private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 7ea963d4c7d..1ec395d350a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -26,15 +26,12 @@ import java.io.RandomAccessFile; import java.net.InetSocketAddress; import java.util.Iterator; -import java.util.Random; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -61,19 +58,6 @@ public class TestReplication { private static final int numDatanodes = racks.length; private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestReplication"); - - private void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) repl, blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } /* check if there are at least two nodes are on the same rack */ private void checkFile(FileSystem fileSys, Path name, int repl) @@ -222,19 +206,25 @@ public void runReplication(boolean simulated) throws IOException { FileSystem fileSys = cluster.getFileSystem(); try { Path file1 = new Path("/smallblocktest.dat"); - writeFile(fileSys, file1, 3); + //writeFile(fileSys, file1, 3); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 3, seed); checkFile(fileSys, file1, 3); cleanupFile(fileSys, file1); - writeFile(fileSys, file1, 10); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 10, seed); checkFile(fileSys, file1, 10); cleanupFile(fileSys, file1); - writeFile(fileSys, file1, 4); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 4, seed); checkFile(fileSys, file1, 4); cleanupFile(fileSys, file1); - writeFile(fileSys, file1, 1); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 1, seed); checkFile(fileSys, file1, 1); cleanupFile(fileSys, file1); - writeFile(fileSys, file1, 2); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 2, seed); checkFile(fileSys, file1, 2); cleanupFile(fileSys, file1); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java index c2f1cf39c3c..2ff1c291c1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java @@ -40,16 +40,6 @@ public class TestSeekBug { static final long seed = 0xDEADBEEFL; static final int ONEMB = 1 << 20; - private void writeFile(FileSystem fileSys, Path name) throws IOException { - // create and write a file that contains 1MB - DataOutputStream stm = fileSys.create(name); - byte[] buffer = new byte[ONEMB]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) { for (int idx = 0; idx < actual.length; idx++) { assertEquals(message+" byte "+(from+idx)+" differs. expected "+ @@ -132,7 +122,9 @@ public void testSeekBugDFS() throws IOException { FileSystem fileSys = cluster.getFileSystem(); try { Path file1 = new Path("seektest.dat"); - writeFile(fileSys, file1); + DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB, + fileSys.getDefaultBlockSize(file1), + fileSys.getDefaultReplication(file1), seed); seekReadFile(fileSys, file1); smallReadSeek(fileSys, file1); cleanupFile(fileSys, file1); @@ -151,7 +143,9 @@ public void testSeekBugLocalFS() throws IOException { FileSystem fileSys = FileSystem.getLocal(conf); try { Path file1 = new Path("build/test/data", "seektest.dat"); - writeFile(fileSys, file1); + DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB, + fileSys.getDefaultBlockSize(file1), + fileSys.getDefaultReplication(file1), seed); seekReadFile(fileSys, file1); cleanupFile(fileSys, file1); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java index 8cbb4fd3172..90f47e58333 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java @@ -42,18 +42,6 @@ public class TestSmallBlock { static final int blockSize = 1; static final int fileSize = 20; boolean simulatedStorage = false; - - private void writeFile(FileSystem fileSys, Path name) throws IOException { - // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) 1, blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) { for (int idx = 0; idx < actual.length; idx++) { @@ -105,7 +93,8 @@ public void testSmallBlock() throws IOException { FileSystem fileSys = cluster.getFileSystem(); try { Path file1 = new Path("smallblocktest.dat"); - writeFile(fileSys, file1); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 1, seed); checkFile(fileSys, file1); cleanupFile(fileSys, file1); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 13f871934ed..b81feacaeb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -64,6 +65,10 @@ public class TestBackupNode { } static final String BASE_DIR = MiniDFSCluster.getBaseDirectory(); + + static final long seed = 0xDEADBEEFL; + static final int blockSize = 4096; + static final int fileSize = 8192; @Before public void setUp() throws Exception { @@ -350,14 +355,17 @@ void testCheckpoint(StartupOption op) throws Exception { + NetUtils.getHostPortString(add)).toUri(), conf); boolean canWrite = true; try { - TestCheckpoint.writeFile(bnFS, file3, replication); + DFSTestUtil.createFile(bnFS, file3, fileSize, fileSize, blockSize, + replication, seed); } catch (IOException eio) { LOG.info("Write to BN failed as expected: ", eio); canWrite = false; } assertFalse("Write to BackupNode must be prohibited.", canWrite); - TestCheckpoint.writeFile(fileSys, file3, replication); + DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize, + replication, seed); + TestCheckpoint.checkFile(fileSys, file3, replication); // should also be on BN right away assertTrue("file3 does not exist on BackupNode", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 58fbfec31dc..826564e63f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -28,7 +28,6 @@ import static org.junit.Assert.fail; import java.io.File; -import java.io.FilenameFilter; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetSocketAddress; @@ -36,14 +35,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Random; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; @@ -51,6 +48,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -116,19 +114,6 @@ public void setUp() throws IOException { faultInjector = Mockito.mock(CheckpointFaultInjector.class); CheckpointFaultInjector.instance = faultInjector; } - - static void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) repl, blockSize); - byte[] buffer = new byte[TestCheckpoint.fileSize]; - Random rand = new Random(TestCheckpoint.seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - static void checkFile(FileSystem fileSys, Path name, int repl) throws IOException { @@ -259,7 +244,8 @@ public void testSecondaryNamenodeError1() // // Create a new file // - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + replication, seed); checkFile(fileSys, file1, replication); } finally { fileSys.close(); @@ -323,7 +309,8 @@ public void testSecondaryNamenodeError2() throws IOException { // // Create a new file // - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + replication, seed); checkFile(fileSys, file1, replication); } finally { fileSys.close(); @@ -394,7 +381,8 @@ public void testSecondaryNamenodeError3() throws IOException { // // Create a new file // - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + replication, seed); checkFile(fileSys, file1, replication); } finally { fileSys.close(); @@ -580,7 +568,8 @@ private void doSendFailTest(String exceptionSubstring) // // Create a new file // - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + replication, seed); checkFile(fileSys, file1, replication); } finally { fileSys.close(); @@ -906,7 +895,8 @@ public void testCheckpoint() throws IOException { // // Create file1 // - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + replication, seed); checkFile(fileSys, file1, replication); // @@ -933,7 +923,8 @@ public void testCheckpoint() throws IOException { cleanupFile(fileSys, file1); // create new file file2 - writeFile(fileSys, file2, replication); + DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize, + replication, seed); checkFile(fileSys, file2, replication); // @@ -999,7 +990,8 @@ public void testSaveNamespace() throws IOException { } // create new file Path file = new Path("namespace.dat"); - writeFile(fs, file, replication); + DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize, + replication, seed); checkFile(fs, file, replication); // create new link diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 0138070cd25..6483386fb58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -116,19 +117,6 @@ private static void writeConfigFile(FileSystem fs, Path name, stm.close(); } - private void writeFile(FileSystem fileSys, Path name, short repl) - throws IOException { - // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, - blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name, short repl) throws IOException { // create and write a file that contains three blocks of data @@ -198,7 +186,8 @@ public void testDecommissionStatus() throws IOException, InterruptedException { // Decommission one node. Verify the decommission status // Path file1 = new Path("decommission.dat"); - writeFile(fileSys, file1, replicas); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + replicas, seed); Path file2 = new Path("decommission1.dat"); FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java index 7fd6f47ad5e..ca51d63b1a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @@ -20,14 +20,12 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; -import java.util.Random; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -43,19 +41,6 @@ public class TestFileLimit { static final int blockSize = 8192; boolean simulatedStorage = false; - // creates a zero file. - private void createFile(FileSystem fileSys, Path name) - throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) 1, blockSize); - byte[] buffer = new byte[1024]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - private void waitForLimit(FSNamesystem namesys, long num) { // wait for number of blocks to decrease @@ -106,7 +91,7 @@ public void testFileLimit() throws IOException { // for (int i = 0; i < maxObjects/2; i++) { Path file = new Path("/filestatus" + i); - createFile(fs, file); + DFSTestUtil.createFile(fs, file, 1024, 1024, blockSize, (short) 1, seed); System.out.println("Created file " + file); currentNodes += 2; // two more objects for this creation. } @@ -115,7 +100,7 @@ public void testFileLimit() throws IOException { boolean hitException = false; try { Path file = new Path("/filestatus"); - createFile(fs, file); + DFSTestUtil.createFile(fs, file, 1024, 1024, blockSize, (short) 1, seed); System.out.println("Created file " + file); } catch (IOException e) { hitException = true; @@ -132,7 +117,7 @@ public void testFileLimit() throws IOException { waitForLimit(namesys, currentNodes); // now, we shud be able to create a new file - createFile(fs, file0); + DFSTestUtil.createFile(fs, file0, 1024, 1024, blockSize, (short) 1, seed); System.out.println("Created file " + file0 + " again."); currentNodes += 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 2f2b6888fca..9befc49f014 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.AfterClass; @@ -48,17 +49,6 @@ public class TestMetaSave { private static MiniDFSCluster cluster = null; private static FileSystem fileSys = null; - private void createFile(FileSystem fileSys, Path name) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) 2, blockSize); - byte[] buffer = new byte[1024]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - @BeforeClass public static void setUp() throws IOException { // start a cluster @@ -84,7 +74,8 @@ public void testMetaSave() throws IOException, InterruptedException { for (int i = 0; i < 2; i++) { Path file = new Path("/filestatus" + i); - createFile(fileSys, file); + DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2, + seed); } cluster.stopDataNode(1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 70f5b577851..63388be3207 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -25,17 +25,15 @@ import java.io.File; import java.io.IOException; import java.util.List; -import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -71,18 +69,6 @@ public void setUp() throws IOException { } } - private void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) repl, BLOCK_SIZE); - byte[] buffer = new byte[FILE_SIZE]; - Random rand = new Random(SEED); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - void checkImageAndEditsFilesExistence(File dir, boolean shouldHaveImages, boolean shouldHaveEdits) @@ -187,7 +173,8 @@ public void testNameEditsConfigs() throws Exception { try { assertTrue(!fileSys.exists(file1)); - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file1, replication); secondary.doCheckpoint(); } finally { @@ -224,7 +211,8 @@ public void testNameEditsConfigs() throws Exception { assertTrue(fileSys.exists(file1)); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); - writeFile(fileSys, file2, replication); + DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file2, replication); secondary.doCheckpoint(); } finally { @@ -260,7 +248,8 @@ public void testNameEditsConfigs() throws Exception { assertTrue(fileSys.exists(file2)); checkFile(fileSys, file2, replication); cleanupFile(fileSys, file2); - writeFile(fileSys, file3, replication); + DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file3, replication); secondary.doCheckpoint(); } finally { @@ -364,7 +353,8 @@ public void testNameEditsConfigsFailure() throws IOException { fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); - writeFile(fileSys, file1, replication); + DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file1, replication); } finally { fileSys.close(); @@ -402,7 +392,8 @@ public void testNameEditsConfigsFailure() throws IOException { assertTrue(fileSys.exists(file1)); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); - writeFile(fileSys, file2, replication); + DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file2, replication); } finally { fileSys.close(); @@ -429,7 +420,8 @@ public void testNameEditsConfigsFailure() throws IOException { assertTrue(fileSys.exists(file2)); checkFile(fileSys, file2, replication); cleanupFile(fileSys, file2); - writeFile(fileSys, file3, replication); + DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file3, replication); } finally { fileSys.close(); @@ -483,7 +475,8 @@ public void testNameEditsConfigsFailure() throws IOException { assertTrue(fileSys.exists(file3)); checkFile(fileSys, file3, replication); cleanupFile(fileSys, file3); - writeFile(fileSys, file3, replication); + DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, + (short) replication, SEED); checkFile(fileSys, file3, replication); } finally { fileSys.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 28e22aa4be7..45a4f60973b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -77,20 +77,6 @@ public class TestStartup { static final int fileSize = 8192; private long editsLength=0, fsimageLength=0; - - private void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) repl, blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - - @Before public void setUp() throws Exception { config = new HdfsConfiguration(); @@ -150,7 +136,8 @@ public void createCheckPoint() throws IOException { // create a file FileSystem fileSys = cluster.getFileSystem(); Path file1 = new Path("t1"); - this.writeFile(fileSys, file1, 1); + DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, + (short) 1, seed); LOG.info("--doing checkpoint"); sn.doCheckpoint(); // this shouldn't fail diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index 6901f6439e7..92a1530bf75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.JMXGet; @@ -53,19 +54,6 @@ public class TestJMXGet { static final int blockSize = 4096; static final int fileSize = 8192; - private void writeFile(FileSystem fileSys, Path name, int repl) - throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short)repl, blockSize); - byte[] buffer = new byte[fileSize]; - Random rand = new Random(seed); - rand.nextBytes(buffer); - stm.write(buffer); - stm.close(); - } - - @Before public void setUp() throws Exception { config = new HdfsConfiguration(); @@ -96,7 +84,8 @@ public void testNameNode() throws Exception { cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); - writeFile(cluster.getFileSystem(), new Path("/test1"), 2); + DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test1"), + fileSize, fileSize, blockSize, (short) 2, seed); JMXGet jmx = new JMXGet(); //jmx.setService("*"); // list all hadoop services @@ -125,7 +114,8 @@ public void testDataNode() throws Exception { cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); - writeFile(cluster.getFileSystem(), new Path("/test"), 2); + DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test"), + fileSize, fileSize, blockSize, (short) 2, seed); JMXGet jmx = new JMXGet(); //jmx.setService("*"); // list all hadoop services