diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 65bae09ca3a..dbf5539f3e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -649,6 +649,8 @@ Release 2.2.1 - UNRELEASED HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh) + HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index b933387a31b..7ed77585853 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -46,9 +47,11 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -139,6 +142,7 @@ public class NamenodeFsck { private final Configuration conf; private final PrintWriter out; + private List snapshottableDirs = null; /** * Filesystem checker. @@ -178,6 +182,8 @@ public class NamenodeFsck { } else if (key.equals("startblockafter")) { this.currentCookie[0] = pmap.get("startblockafter")[0]; + } else if (key.equals("includeSnapshots")) { + this.snapshottableDirs = new ArrayList(); } } } @@ -194,6 +200,16 @@ public class NamenodeFsck { out.println(msg); namenode.getNamesystem().logFsckEvent(path, remoteAddress); + if (snapshottableDirs != null) { + SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer() + .getSnapshottableDirListing(); + if (snapshotDirs != null) { + for (SnapshottableDirectoryStatus dir : snapshotDirs) { + snapshottableDirs.add(dir.getFullPath().toString()); + } + } + } + final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path); if (file != null) { @@ -272,6 +288,14 @@ public class NamenodeFsck { boolean isOpen = false; if (file.isDir()) { + if (snapshottableDirs != null && snapshottableDirs.contains(path)) { + String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + + Path.SEPARATOR) + + HdfsConstants.DOT_SNAPSHOT_DIR; + HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo( + snapshotPath); + check(snapshotPath, snapshotFileInfo, res); + } byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; DirectoryListing thisListing; if (showFiles) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 3b846c3a397..fd7fe067806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -83,15 +83,23 @@ public class DFSck extends Configured implements Tool { + "\t-delete\tdelete corrupted files\n" + "\t-files\tprint out files being checked\n" + "\t-openforwrite\tprint out files opened for write\n" + + "\t-includeSnapshots\tinclude snapshot data if the given path" + + " indicates a snapshottable directory or there are " + + "snapshottable directories under it\n" + "\t-list-corruptfileblocks\tprint out list of missing " + "blocks and files they belong to\n" + "\t-blocks\tprint out block report\n" + "\t-locations\tprint out locations for every block\n" - + "\t-racks\tprint out network topology for data-node locations\n" - + "\t\tBy default fsck ignores files opened for write, " + + "\t-racks\tprint out network topology for data-node locations\n\n" + + "Please Note:\n" + + "\t1. By default fsck ignores files opened for write, " + "use -openforwrite to report such files. They are usually " + " tagged CORRUPT or HEALTHY depending on their block " - + "allocation status"; + + "allocation status\n" + + "\t2. Option -includeSnapshots should not be used for comparing stats," + + " should be used only for HEALTH check, as this may contain duplicates" + + " if the same file present in both original fs tree " + + "and inside snapshots."; private final UserGroupInformation ugi; private final PrintStream out; @@ -266,6 +274,8 @@ public class DFSck extends Configured implements Tool { else if (args[idx].equals("-list-corruptfileblocks")) { url.append("&listcorruptfileblocks=1"); doListCorruptFileBlocks = true; + } else if (args[idx].equals("-includeSnapshots")) { + url.append("&includeSnapshots=1"); } else if (!args[idx].startsWith("-")) { if (null == dir) { dir = args[idx]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index bcebce4e201..a6dd4fea1b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1058,4 +1058,31 @@ public class TestFsck { if (cluster != null) { cluster.shutdown(); } } } + + /** + * Test for including the snapshot files in fsck report + */ + @Test + public void testFsckForSnapshotFiles() throws Exception { + final Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + try { + String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", + "-files"); + assertTrue(runFsck.contains("HEALTHY")); + final String fileName = "/srcdat"; + DistributedFileSystem hdfs = cluster.getFileSystem(); + Path file1 = new Path(fileName); + DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L); + hdfs.allowSnapshot(new Path("/")); + hdfs.createSnapshot(new Path("/"), "mySnapShot"); + runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files"); + assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat")); + runFsck = runFsck(conf, 0, true, "/", "-files"); + assertFalse(runFsck.contains("mySnapShot")); + } finally { + cluster.shutdown(); + } + } }