From a6250a4943d90c10bcfe9a2a46d6558c6d1a2d50 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 5 Nov 2013 06:38:50 +0000 Subject: [PATCH] HDFS-5427. Not able to read deleted files from snapshot directly under snapshottable dir after checkpoint and NN restart. Contributed by Vinay. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1538875 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSImageFormat.java | 7 +- .../snapshot/SnapshotFSImageFormat.java | 3 + .../snapshot/TestSnapshotBlocksMap.java | 69 +++++++++++++++++++ 4 files changed, 80 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 29a529022b4..5c6d9b05fe3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -495,6 +495,9 @@ Release 2.3.0 - UNRELEASED HDFS-5257. addBlock() retry should return LocatedBlock with locations else client will get AIOBE. (Vinay via jing9) + HDFS-5427. Not able to read deleted files from snapshot directly under + snapshottable dir after checkpoint and NN restart. (Vinay via jing9) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 34659fbc7cb..2746c7872b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -588,8 +588,12 @@ public class FSImageFormat { namesystem.dir.cacheName(child); if (child.isFile()) { + updateBlocksMap(child.asFile()); + } + } + + public void updateBlocksMap(INodeFile file) { // Add file->block mapping - final INodeFile file = child.asFile(); final BlockInfo[] blocks = file.getBlocks(); if (blocks != null) { final BlockManager bm = namesystem.getBlockManager(); @@ -598,7 +602,6 @@ public class FSImageFormat { } } } - } /** @return The FSDirectory of the namesystem where the fsimage is loaded */ public FSDirectory getFSDirectoryInLoading() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index 3a46023ab1b..01233dd7eca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -203,6 +203,9 @@ public class SnapshotFSImageFormat { // useful, but set the parent here to be consistent with the original // fsdir tree. deleted.setParent(parent); + if (deleted.isFile()) { + loader.updateBlocksMap(deleted.asFile()); + } } return deletedList; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java index 91f28ade185..9bf8c434fb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -36,6 +37,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -205,4 +208,70 @@ public class TestSnapshotBlocksMap { assertExceptionContains("File does not exist: " + s1f0, e); } } + + /* + * Try to read the files inside snapshot but deleted in original place after + * restarting post checkpoint. refer HDFS-5427 + */ + @Test(timeout = 30000) + public void testReadSnapshotFileWithCheckpoint() throws Exception { + Path foo = new Path("/foo"); + hdfs.mkdirs(foo); + hdfs.allowSnapshot(foo); + Path bar = new Path("/foo/bar"); + DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); + hdfs.createSnapshot(foo, "s1"); + assertTrue(hdfs.delete(bar, true)); + + // checkpoint + NameNode nameNode = cluster.getNameNode(); + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + NameNodeAdapter.leaveSafeMode(nameNode); + + // restart namenode to load snapshot files from fsimage + cluster.restartNameNode(true); + String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); + DFSTestUtil.readFile(hdfs, new Path(snapshotPath)); + } + + /* + * Try to read the files inside snapshot but renamed to different file and + * deleted after restarting post checkpoint. refer HDFS-5427 + */ + @Test(timeout = 30000) + public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception { + final Path foo = new Path("/foo"); + final Path foo2 = new Path("/foo2"); + hdfs.mkdirs(foo); + hdfs.mkdirs(foo2); + + hdfs.allowSnapshot(foo); + hdfs.allowSnapshot(foo2); + final Path bar = new Path(foo, "bar"); + final Path bar2 = new Path(foo2, "bar"); + DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); + hdfs.createSnapshot(foo, "s1"); + // rename to another snapshottable directory and take snapshot + assertTrue(hdfs.rename(bar, bar2)); + hdfs.createSnapshot(foo2, "s2"); + // delete the original renamed file to make sure blocks are not updated by + // the original file + assertTrue(hdfs.delete(bar2, true)); + + // checkpoint + NameNode nameNode = cluster.getNameNode(); + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + NameNodeAdapter.leaveSafeMode(nameNode); + // restart namenode to load snapshot files from fsimage + cluster.restartNameNode(true); + // file in first snapshot + String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); + DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath)); + // file in second snapshot after rename+delete + String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(), + "s2/bar"); + DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); + } }