diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0707ffa1678..f17a9dea19a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -47,6 +47,9 @@ Release 2.0.5-beta - UNRELEASED HDFS-4679. Namenode operation checks should be done in a consistent manner. (suresh) + HDFS-4693. Some test cases in TestCheckpoint do not clean up after + themselves. (Arpit Agarwal, suresh via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 16c12608909..ce6761cccf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -290,6 +290,7 @@ public class SecondaryNameNode implements Runnable { try { infoServer.join(); } catch (InterruptedException ie) { + LOG.debug("Exception ", ie); } } @@ -309,15 +310,25 @@ public class SecondaryNameNode implements Runnable { } } try { - if (infoServer != null) infoServer.stop(); + if (infoServer != null) { + infoServer.stop(); + infoServer = null; + } } catch (Exception e) { LOG.warn("Exception shutting down SecondaryNameNode", e); } try { - if (checkpointImage != null) checkpointImage.close(); + if (checkpointImage != null) { + checkpointImage.close(); + checkpointImage = null; + } } catch(IOException e) { LOG.warn("Exception while closing CheckpointStorage", e); } + if (namesystem != null) { + namesystem.shutdown(); + namesystem = null; + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index f418724c767..1a676897a16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -109,6 +109,7 @@ public class UpgradeUtilities { config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString()); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString()); MiniDFSCluster cluster = null; + String bpid = null; try { // format data-node createEmptyDirs(new String[] {datanodeStorage.toString()}); @@ -149,6 +150,7 @@ public class UpgradeUtilities { // write more files writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize); + bpid = cluster.getNamesystem(0).getBlockPoolId(); } finally { // shutdown if (cluster != null) cluster.shutdown(); @@ -160,7 +162,6 @@ public class UpgradeUtilities { File dnCurDir = new File(datanodeStorage, "current"); datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir); - String bpid = cluster.getNamesystem(0).getBlockPoolId(); File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current"); blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 03aec098d3a..9d12367f63c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -112,9 +112,8 @@ public class TestCheckpoint { private CheckpointFaultInjector faultInjector; @Before - public void setUp() throws IOException { + public void setUp() { FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory())); - faultInjector = Mockito.mock(CheckpointFaultInjector.class); CheckpointFaultInjector.instance = faultInjector; } @@ -158,9 +157,8 @@ public class TestCheckpoint { public void testNameDirError() throws IOException { LOG.info("Starting testNameDirError"); Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .build(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .build(); Collection nameDirs = cluster.getNameDirs(0); cluster.shutdown(); @@ -172,20 +170,15 @@ public class TestCheckpoint { try { // Simulate the mount going read-only dir.setWritable(false); - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .format(false) - .build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .format(false).build(); fail("NN should have failed to start with " + dir + " set unreadable"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( - "storage directory does not exist or is not accessible", - ioe); + "storage directory does not exist or is not accessible", ioe); } finally { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } + cleanup(cluster); + cluster = null; dir.setWritable(true); } } @@ -271,15 +264,13 @@ public class TestCheckpoint { assertTrue("Another checkpoint should have reloaded image", secondary.doCheckpoint()); } finally { - if (secondary != null) { - secondary.shutdown(); - } if (fs != null) { fs.close(); } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; Mockito.reset(faultInjector); } } @@ -320,15 +311,13 @@ public class TestCheckpoint { ExitUtil.resetFirstExitException(); assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1); } finally { - if (secondary != null) { - secondary.shutdown(); - } if (fs != null) { fs.close(); } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; Mockito.reset(faultInjector); } } @@ -342,17 +331,18 @@ public class TestCheckpoint { LOG.info("Starting testSecondaryNamenodeError1"); Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpointxx.dat"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes) - .build(); - cluster.waitActive(); - FileSystem fileSys = cluster.getFileSystem(); + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); - // + // Make the checkpoint fail after rolling the edits log. - // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); Mockito.doThrow(new IOException( "Injecting failure after rolling edit logs")) @@ -362,10 +352,10 @@ public class TestCheckpoint { secondary.doCheckpoint(); // this should fail assertTrue(false); } catch (IOException e) { + // expected } Mockito.reset(faultInjector); - secondary.shutdown(); // // Create a new file @@ -374,7 +364,10 @@ public class TestCheckpoint { checkFile(fileSys, file1, replication); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } // @@ -382,20 +375,22 @@ public class TestCheckpoint { // Then take another checkpoint to verify that the // namenode restart accounted for the rolled edit logs. // - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) - .format(false).build(); - cluster.waitActive(); - - fileSys = cluster.getFileSystem(); try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false).build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -407,17 +402,19 @@ public class TestCheckpoint { LOG.info("Starting testSecondaryNamenodeError2"); Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpointyy.dat"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes) - .build(); - cluster.waitActive(); - FileSystem fileSys = cluster.getFileSystem(); + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); // // Make the checkpoint fail after uploading the new fsimage. // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); Mockito.doThrow(new IOException( "Injecting failure after uploading new image")) @@ -427,9 +424,9 @@ public class TestCheckpoint { secondary.doCheckpoint(); // this should fail assertTrue(false); } catch (IOException e) { + // expected } Mockito.reset(faultInjector); - secondary.shutdown(); // // Create a new file @@ -438,7 +435,10 @@ public class TestCheckpoint { checkFile(fileSys, file1, replication); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } // @@ -446,18 +446,22 @@ public class TestCheckpoint { // Then take another checkpoint to verify that the // namenode restart accounted for the rolled edit logs. // - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); - cluster.waitActive(); - fileSys = cluster.getFileSystem(); try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false).build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -469,18 +473,19 @@ public class TestCheckpoint { LOG.info("Starting testSecondaryNamenodeError3"); Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpointzz.dat"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes) - .build(); - - cluster.waitActive(); - FileSystem fileSys = cluster.getFileSystem(); + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); // // Make the checkpoint fail after rolling the edit log. // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); Mockito.doThrow(new IOException( "Injecting failure after rolling edit logs")) @@ -490,6 +495,7 @@ public class TestCheckpoint { secondary.doCheckpoint(); // this should fail assertTrue(false); } catch (IOException e) { + // expected } Mockito.reset(faultInjector); secondary.shutdown(); // secondary namenode crash! @@ -500,7 +506,6 @@ public class TestCheckpoint { // secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); // this should work correctly - secondary.shutdown(); // // Create a new file @@ -509,7 +514,10 @@ public class TestCheckpoint { checkFile(fileSys, file1, replication); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } // @@ -517,18 +525,22 @@ public class TestCheckpoint { // Then take another checkpoint to verify that the // namenode restart accounted for the twice-rolled edit logs. // - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); - cluster.waitActive(); - fileSys = cluster.getFileSystem(); try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false).build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -565,13 +577,16 @@ public class TestCheckpoint { LOG.info("Starting testSecondaryFailsToReturnImage"); Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpointRI.dat"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes) - .build(); - cluster.waitActive(); - FileSystem fileSys = cluster.getFileSystem(); - FSImage image = cluster.getNameNode().getFSImage(); + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + FSImage image = null; + SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); + image = cluster.getNameNode().getFSImage(); assertTrue(!fileSys.exists(file1)); StorageDirectory sd = image.getStorage().getStorageDir(0); @@ -580,7 +595,7 @@ public class TestCheckpoint { // // Make the checkpoint // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); try { secondary.doCheckpoint(); // this should fail @@ -601,10 +616,12 @@ public class TestCheckpoint { assertEquals(fsimageLength, len); } - secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -658,17 +675,19 @@ public class TestCheckpoint { throws IOException { Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes) - .build(); - cluster.waitActive(); - FileSystem fileSys = cluster.getFileSystem(); + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); // // Make the checkpoint fail after rolling the edit log. // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); try { secondary.doCheckpoint(); // this should fail @@ -679,6 +698,7 @@ public class TestCheckpoint { } Mockito.reset(faultInjector); secondary.shutdown(); // secondary namenode crash! + secondary = null; // start new instance of secondary and verify that // a new rollEditLog succedes in spite of the fact that we had @@ -686,7 +706,6 @@ public class TestCheckpoint { // secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); // this should work correctly - secondary.shutdown(); // // Create a new file @@ -695,7 +714,10 @@ public class TestCheckpoint { checkFile(fileSys, file1, replication); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -706,21 +728,21 @@ public class TestCheckpoint { @Test public void testNameDirLocking() throws IOException { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .build(); + MiniDFSCluster cluster = null; // Start a NN, and verify that lock() fails in all of the configured // directories StorageDirectory savedSd = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(null)) { assertLockFails(sd); savedSd = sd; } } finally { - cluster.shutdown(); + cleanup(cluster); + cluster = null; } assertNotNull(savedSd); @@ -741,15 +763,14 @@ public class TestCheckpoint { conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDir.getAbsolutePath()); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .manageNameDfsDirs(false) - .numDataNodes(0) - .build(); + MiniDFSCluster cluster = null; // Start a NN, and verify that lock() fails in all of the configured // directories StorageDirectory savedSd = null; try { + cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false) + .numDataNodes(0).build(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { assertEquals(editsDir.getAbsoluteFile(), sd.getRoot()); @@ -757,7 +778,8 @@ public class TestCheckpoint { savedSd = sd; } } finally { - cluster.shutdown(); + cleanup(cluster); + cluster = null; } assertNotNull(savedSd); @@ -773,12 +795,10 @@ public class TestCheckpoint { public void testSecondaryNameNodeLocking() throws Exception { // Start a primary NN so that the secondary will start successfully Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .build(); - + MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); StorageDirectory savedSd = null; // Start a secondary NN, then make sure that all of its storage // dirs got locked. @@ -808,10 +828,10 @@ public class TestCheckpoint { } } finally { - if (secondary != null) { - secondary.shutdown(); - } - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -822,12 +842,10 @@ public class TestCheckpoint { @Test public void testStorageAlreadyLockedErrorMessage() throws Exception { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .build(); - + MiniDFSCluster cluster = null; StorageDirectory savedSd = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(null)) { assertLockFails(sd); @@ -845,7 +863,8 @@ public class TestCheckpoint { + "'", logs.getOutput().contains(jvmName)); } } finally { - cluster.shutdown(); + cleanup(cluster); + cluster = null; } } @@ -873,18 +892,17 @@ public class TestCheckpoint { Configuration conf, StorageDirectory sdToLock) throws IOException { // Lock the edits dir, then start the NN, and make sure it fails to start sdToLock.lock(); + MiniDFSCluster cluster = null; try { - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .format(false) - .manageNameDfsDirs(false) - .numDataNodes(0) - .build(); + cluster = new MiniDFSCluster.Builder(conf).format(false) + .manageNameDfsDirs(false).numDataNodes(0).build(); assertFalse("cluster should fail to start after locking " + sdToLock, sdToLock.isLockSupported()); - cluster.shutdown(); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } finally { + cleanup(cluster); + cluster = null; sdToLock.unlock(); } } @@ -901,11 +919,12 @@ public class TestCheckpoint { Configuration conf = new HdfsConfiguration(); Path testPath = new Path("/testfile"); SecondaryNameNode snn = null; - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .build(); - Collection nameDirs = cluster.getNameDirs(0); + MiniDFSCluster cluster = null; + Collection nameDirs = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + nameDirs = cluster.getNameDirs(0); + // Make an entry in the namespace, used for verifying checkpoint // later. cluster.getFileSystem().mkdirs(testPath); @@ -914,21 +933,16 @@ public class TestCheckpoint { snn = startSecondaryNameNode(conf); snn.doCheckpoint(); } finally { - if (snn != null) { - snn.shutdown(); - } - cluster.shutdown(); + cleanup(snn); + cleanup(cluster); cluster = null; } LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail."); try { - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .format(false) - .startupOption(StartupOption.IMPORT) - .build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false) + .startupOption(StartupOption.IMPORT).build(); fail("NameNode did not fail to start when it already contained " + "an image"); } catch (IOException ioe) { @@ -936,10 +950,8 @@ public class TestCheckpoint { GenericTestUtils.assertExceptionContains( "NameNode already contains an image", ioe); } finally { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } + cleanup(cluster); + cluster = null; } LOG.info("Removing NN storage contents"); @@ -951,11 +963,8 @@ public class TestCheckpoint { LOG.info("Trying to import checkpoint"); try { - cluster = new MiniDFSCluster.Builder(conf) - .format(false) - .numDataNodes(0) - .startupOption(StartupOption.IMPORT) - .build(); + cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0) + .startupOption(StartupOption.IMPORT).build(); assertTrue("Path from checkpoint should exist after import", cluster.getFileSystem().exists(testPath)); @@ -963,9 +972,8 @@ public class TestCheckpoint { // Make sure that the image got saved on import FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3)); } finally { - if (cluster != null) { - cluster.shutdown(); - } + cleanup(cluster); + cluster = null; } } @@ -1002,12 +1010,15 @@ public class TestCheckpoint { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes).build(); - cluster.waitActive(); - FileSystem fileSys = cluster.getFileSystem(); - + + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + SecondaryNameNode secondary = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes( + numDatanodes).build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); // // verify that 'format' really blew away all pre-existing files // @@ -1023,22 +1034,26 @@ public class TestCheckpoint { // // Take a checkpoint // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); - secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } // // Restart cluster and verify that file1 still exist. // - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); - cluster.waitActive(); - fileSys = cluster.getFileSystem(); Path tmpDir = new Path("/tmp_tmp"); try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false).build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); + // check that file1 still exists checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); @@ -1050,17 +1065,18 @@ public class TestCheckpoint { // // Take a checkpoint // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); fileSys.delete(tmpDir, true); fileSys.mkdirs(tmpDir); secondary.doCheckpoint(); - - secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } // @@ -1080,6 +1096,7 @@ public class TestCheckpoint { } finally { fileSys.close(); cluster.shutdown(); + cluster = null; } } @@ -1095,7 +1112,7 @@ public class TestCheckpoint { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build(); cluster.waitActive(); - fs = (DistributedFileSystem)(cluster.getFileSystem()); + fs = (cluster.getFileSystem()); fc = FileContext.getFileContext(cluster.getURI(0)); // Saving image without safe mode should fail @@ -1177,17 +1194,14 @@ public class TestCheckpoint { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); - fs = (DistributedFileSystem)(cluster.getFileSystem()); + fs = (cluster.getFileSystem()); checkFile(fs, file, replication); fc = FileContext.getFileContext(cluster.getURI(0)); assertTrue(fc.getFileLinkStatus(symlink).isSymlink()); } finally { - try { - if(fs != null) fs.close(); - if(cluster!= null) cluster.shutdown(); - } catch (Throwable t) { - LOG.error("Failed to shutdown", t); - } + if(fs != null) fs.close(); + cleanup(cluster); + cluster = null; } } @@ -1199,26 +1213,31 @@ public class TestCheckpoint { MiniDFSCluster cluster = null; Configuration conf = new HdfsConfiguration(); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) - .format(true).build(); - NameNode nn = cluster.getNameNode(); - NamenodeProtocols nnRpc = nn.getRpcServer(); - - SecondaryNameNode secondary = startSecondaryNameNode(conf); - // prepare checkpoint image - secondary.doCheckpoint(); - CheckpointSignature sig = nnRpc.rollEditLog(); - // manipulate the CheckpointSignature fields - sig.setBlockpoolID("somerandomebpid"); - sig.clusterID = "somerandomcid"; + SecondaryNameNode secondary = null; try { - sig.validateStorageInfo(nn.getFSImage()); // this should fail - assertTrue("This test is expected to fail.", false); - } catch (Exception ignored) { - } + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(true).build(); + NameNode nn = cluster.getNameNode(); + NamenodeProtocols nnRpc = nn.getRpcServer(); - secondary.shutdown(); - cluster.shutdown(); + secondary = startSecondaryNameNode(conf); + // prepare checkpoint image + secondary.doCheckpoint(); + CheckpointSignature sig = nnRpc.rollEditLog(); + // manipulate the CheckpointSignature fields + sig.setBlockpoolID("somerandomebpid"); + sig.clusterID = "somerandomcid"; + try { + sig.validateStorageInfo(nn.getFSImage()); // this should fail + assertTrue("This test is expected to fail.", false); + } catch (Exception ignored) { + } + } finally { + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; + } } /** @@ -1269,12 +1288,10 @@ public class TestCheckpoint { secondary.doCheckpoint(); } finally { - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -1292,44 +1309,57 @@ public class TestCheckpoint { String nameserviceId2 = "ns2"; conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1 + "," + nameserviceId2); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) - .build(); - Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0)); - Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1)); - InetSocketAddress nn1RpcAddress = - cluster.getNameNode(0).getNameNodeAddress(); - InetSocketAddress nn2RpcAddress = - cluster.getNameNode(1).getNameNodeAddress(); - String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); - String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); + MiniDFSCluster cluster = null; + SecondaryNameNode secondary1 = null; + SecondaryNameNode secondary2 = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) + .build(); + Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0)); + Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1)); + InetSocketAddress nn1RpcAddress = cluster.getNameNode(0) + .getNameNodeAddress(); + InetSocketAddress nn2RpcAddress = cluster.getNameNode(1) + .getNameNodeAddress(); + String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); + String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); - // Set the Service Rpc address to empty to make sure the node specific - // setting works - snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, ""); - snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, ""); + // Set the Service Rpc address to empty to make sure the node specific + // setting works + snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, ""); + snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, ""); - // Set the nameserviceIds - snConf1.set(DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), nn1); - snConf2.set(DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), nn2); + // Set the nameserviceIds + snConf1.set(DFSUtil.addKeySuffixes( + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), + nn1); + snConf2.set(DFSUtil.addKeySuffixes( + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), + nn2); - SecondaryNameNode secondary1 = startSecondaryNameNode(snConf1); - SecondaryNameNode secondary2 = startSecondaryNameNode(snConf2); + secondary1 = startSecondaryNameNode(snConf1); + secondary2 = startSecondaryNameNode(snConf2); - // make sure the two secondary namenodes are talking to correct namenodes. - assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort()); - assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort()); - assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2 - .getNameNodeAddress().getPort()); + // make sure the two secondary namenodes are talking to correct namenodes. + assertEquals(secondary1.getNameNodeAddress().getPort(), + nn1RpcAddress.getPort()); + assertEquals(secondary2.getNameNodeAddress().getPort(), + nn2RpcAddress.getPort()); + assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2 + .getNameNodeAddress().getPort()); - // both should checkpoint. - secondary1.doCheckpoint(); - secondary2.doCheckpoint(); - secondary1.shutdown(); - secondary2.shutdown(); - cluster.shutdown(); + // both should checkpoint. + secondary1.doCheckpoint(); + secondary2.doCheckpoint(); + } finally { + cleanup(secondary1); + secondary1 = null; + cleanup(secondary2); + secondary2 = null; + cleanup(cluster); + cluster = null; + } } /** @@ -1348,12 +1378,13 @@ public class TestCheckpoint { cluster.waitActive(); FileSystem fileSys = cluster.getFileSystem(); FSImage image = cluster.getNameNode().getFSImage(); + SecondaryNameNode secondary = null; try { assertTrue(!fileSys.exists(dir)); // // Make the checkpoint // - SecondaryNameNode secondary = startSecondaryNameNode(conf); + secondary = startSecondaryNameNode(conf); File secondaryDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary1"); File secondaryCurrent = new File(secondaryDir, "current"); @@ -1397,10 +1428,12 @@ public class TestCheckpoint { imageFile.length() > fsimageLength); } - secondary.shutdown(); } finally { fileSys.close(); - cluster.shutdown(); + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -1415,13 +1448,10 @@ public class TestCheckpoint { SecondaryNameNode secondary = null; MiniDFSCluster cluster = null; FileSystem fs = null; - NameNode namenode = null; - try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .build(); cluster.waitActive(); - namenode = cluster.getNameNode(); fs = cluster.getFileSystem(); secondary = startSecondaryNameNode(conf); fos = fs.create(new Path("tmpfile0")); @@ -1445,15 +1475,13 @@ public class TestCheckpoint { // Namenode should still restart successfully cluster.restartNameNode(); } finally { - if (secondary != null) { - secondary.shutdown(); - } if (fs != null) { fs.close(); } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; Mockito.reset(faultInjector); } } @@ -1476,13 +1504,12 @@ public class TestCheckpoint { @Test public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { Configuration conf = new HdfsConfiguration(); - - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .format(true).build(); - + MiniDFSCluster cluster = null; SecondaryNameNode secondary1 = null, secondary2 = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) + .build(); + // Start 2NNs secondary1 = startSecondaryNameNode(conf, 1); secondary2 = startSecondaryNameNode(conf, 2); @@ -1525,20 +1552,23 @@ public class TestCheckpoint { // NN should have received new checkpoint assertEquals(6, storage.getMostRecentCheckpointTxId()); + + // Validate invariant that files named the same are the same. + assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2)); + + // NN should have removed the checkpoint at txid 2 at this point, but has + // one at txid 6 + assertNNHasCheckpoints(cluster, ImmutableList.of(4,6)); } finally { cleanup(secondary1); + secondary1 = null; cleanup(secondary2); + secondary2 = null; if (cluster != null) { cluster.shutdown(); + cluster = null; } } - - // Validate invariant that files named the same are the same. - assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2)); - - // NN should have removed the checkpoint at txid 2 at this point, but has - // one at txid 6 - assertNNHasCheckpoints(cluster, ImmutableList.of(4,6)); } @@ -1562,13 +1592,12 @@ public class TestCheckpoint { @Test public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception { Configuration conf = new HdfsConfiguration(); - - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .format(true).build(); - + MiniDFSCluster cluster = null; SecondaryNameNode secondary1 = null, secondary2 = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) + .build(); + // Start 2NNs secondary1 = startSecondaryNameNode(conf, 1); secondary2 = startSecondaryNameNode(conf, 2); @@ -1633,19 +1662,20 @@ public class TestCheckpoint { // NN should have received new checkpoint assertEquals(8, storage.getMostRecentCheckpointTxId()); + + // Validate invariant that files named the same are the same. + assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2)); + // Validate that the NN received checkpoints at expected txids + // (i.e that both checkpoints went through) + assertNNHasCheckpoints(cluster, ImmutableList.of(6,8)); } finally { cleanup(secondary1); + secondary1 = null; cleanup(secondary2); - if (cluster != null) { - cluster.shutdown(); - } + secondary2 = null; + cleanup(cluster); + cluster = null; } - - // Validate invariant that files named the same are the same. - assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2)); - // Validate that the NN received checkpoints at expected txids - // (i.e that both checkpoints went through) - assertNNHasCheckpoints(cluster, ImmutableList.of(6,8)); } /** @@ -1684,11 +1714,9 @@ public class TestCheckpoint { } // Start a new NN with the same host/port. - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .nameNodePort(origPort) - .nameNodeHttpPort(origHttpPort) - .format(true).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true) + .build(); try { secondary.doCheckpoint(); @@ -1698,12 +1726,10 @@ public class TestCheckpoint { assertTrue(ioe.toString().contains("Inconsistent checkpoint")); } } finally { - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -1762,9 +1788,8 @@ public class TestCheckpoint { assertTrue(msg, msg.contains("but the secondary expected")); } } finally { - if (cluster != null) { - cluster.shutdown(); - } + cleanup(cluster); + cluster = null; } } @@ -1821,12 +1846,10 @@ public class TestCheckpoint { if (currentDir != null) { currentDir.setExecutable(true); } - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -1856,10 +1879,8 @@ public class TestCheckpoint { fileAsURI(new File(base_dir, "namesecondary1")).toString()); try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .format(true) - .manageNameDfsDirs(false) - .build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) + .manageNameDfsDirs(false).build(); secondary = startSecondaryNameNode(conf); @@ -1899,12 +1920,10 @@ public class TestCheckpoint { if (currentDir != null) { currentDir.setExecutable(true); } - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -1951,9 +1970,9 @@ public class TestCheckpoint { }, 200, 15000); } finally { cleanup(secondary); - if (cluster != null) { - cluster.shutdown(); - } + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -1968,7 +1987,6 @@ public class TestCheckpoint { public void testSecondaryHasVeryOutOfDateImage() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; - Configuration conf = new HdfsConfiguration(); try { @@ -1993,12 +2011,10 @@ public class TestCheckpoint { secondary.doCheckpoint(); } finally { - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -2009,12 +2025,11 @@ public class TestCheckpoint { public void testSecondaryPurgesEditLogs() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; - Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .format(true).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) + .build(); FileSystem fs = cluster.getFileSystem(); fs.mkdirs(new Path("/foo")); @@ -2037,12 +2052,10 @@ public class TestCheckpoint { } } finally { - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -2080,12 +2093,10 @@ public class TestCheckpoint { // Ensure that the 2NN can still perform a checkpoint. secondary.doCheckpoint(); } finally { - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -2133,12 +2144,10 @@ public class TestCheckpoint { if (fos != null) { fos.close(); } - if (secondary != null) { - secondary.shutdown(); - } - if (cluster != null) { - cluster.shutdown(); - } + cleanup(secondary); + secondary = null; + cleanup(cluster); + cluster = null; } } @@ -2169,15 +2178,19 @@ public class TestCheckpoint { try { opts.parse("-geteditsize", "-checkpoint"); fail("Should have failed bad parsing for two actions"); - } catch (ParseException e) {} + } catch (ParseException e) { + LOG.warn("Encountered ", e); + } try { opts.parse("-checkpoint", "xx"); fail("Should have failed for bad checkpoint arg"); - } catch (ParseException e) {} + } catch (ParseException e) { + LOG.warn("Encountered ", e); + } } - private void cleanup(SecondaryNameNode snn) { + private static void cleanup(SecondaryNameNode snn) { if (snn != null) { try { snn.shutdown(); @@ -2187,6 +2200,15 @@ public class TestCheckpoint { } } + private static void cleanup(MiniDFSCluster cluster) { + if (cluster != null) { + try { + cluster.shutdown(); + } catch (Exception e) { + LOG.warn("Could not shutdown MiniDFSCluster ", e); + } + } + } /** * Assert that if any two files have the same name across the 2NNs @@ -2246,3 +2268,4 @@ public class TestCheckpoint { } } +