diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 7c9df299e9c..e643d210057 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -1030,6 +1030,7 @@ SecondaryNameNode startSecondaryNameNode(Configuration conf, int index) */ @Test public void testCheckpoint() throws IOException { + Path tmpDir = new Path("/tmp_tmp"); Path file1 = new Path("checkpoint.dat"); Path file2 = new Path("checkpoint2.dat"); Configuration conf = new HdfsConfiguration(); @@ -1057,6 +1058,11 @@ public void testCheckpoint() throws IOException { replication, seed); checkFile(fileSys, file1, replication); + for(int i=0; i < 1000; i++) { + fileSys.mkdirs(tmpDir); + fileSys.delete(tmpDir, true); + } + // // Take a checkpoint // @@ -1081,7 +1087,6 @@ public void testCheckpoint() throws IOException { // // Restart cluster and verify that file1 still exist. // - Path tmpDir = new Path("/tmp_tmp"); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(false).build();