From c59e7456304def867afe35d6ee009ab572f8fdeb Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Wed, 3 Jun 2015 15:30:40 +0530 Subject: [PATCH] HDFS-3716. Purger should remove stale fsimage ckpt files (Contributed by J.Andreina) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/server/namenode/FSImage.java | 1 + .../hdfs/server/namenode/TestFSImage.java | 41 +++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3e251290f82..d65e5132b29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -843,6 +843,9 @@ Release 2.8.0 - UNRELEASED HDFS-8470. fsimage loading progress should update inode, delegation token and cache pool count. (surendra singh lilhore via vinayakumarb) + HDFS-3716. Purger should remove stale fsimage ckpt files + (J.Andreina via vinayakumarb) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 45184e7aa00..cd7cf18b19e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -1209,6 +1209,7 @@ public class FSImage implements Closeable { // Since we now have a new checkpoint, we can clean up some // old edit logs and checkpoints. purgeOldStorage(nnf); + archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_NEW); } finally { // Notify any threads waiting on the checkpoint to be canceled // that it is complete. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 27a1bd3a9ff..df20fd62d49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.File; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; +import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; @@ -118,6 +120,45 @@ public class TestFSImage { } } + /** + * On checkpointing , stale fsimage checkpoint file should be deleted. + */ + @Test + public void testRemovalStaleFsimageCkpt() throws IOException { + MiniDFSCluster cluster = null; + SecondaryNameNode secondary = null; + Configuration conf = new HdfsConfiguration(); + try { + cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(1).format(true).build(); + conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + "0.0.0.0:0"); + secondary = new SecondaryNameNode(conf); + // Do checkpointing + secondary.doCheckpoint(); + NNStorage storage = secondary.getFSImage().storage; + File currentDir = FSImageTestUtil. + getCurrentDirs(storage, NameNodeDirType.IMAGE).get(0); + // Create a stale fsimage.ckpt file + File staleCkptFile = new File(currentDir.getPath() + + "/fsimage.ckpt_0000000000000000002"); + staleCkptFile.createNewFile(); + assertTrue(staleCkptFile.exists()); + // After checkpoint stale fsimage.ckpt file should be deleted + secondary.doCheckpoint(); + assertFalse(staleCkptFile.exists()); + } finally { + if (secondary != null) { + secondary.shutdown(); + secondary = null; + } + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + } + /** * Ensure that the digest written by the saver equals to the digest of the * file.