From c32614f410fb62a7179abfefbab42a05415a3066 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 9 Oct 2015 11:57:03 -0700 Subject: [PATCH] HDFS-8941. DistributedFileSystem listCorruptFileBlocks API should resolve relative path. Contributed by Rakesh R. --- .../hadoop/hdfs/DistributedFileSystem.java | 17 ++++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../namenode/TestListCorruptFileBlocks.java | 68 +++++++++++++++++++ 3 files changed, 86 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 512c9c11cb4..8ed892c2742 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1112,9 +1112,22 @@ public long getCorruptBlocksCount() throws IOException { } @Override - public RemoteIterator listCorruptFileBlocks(Path path) + public RemoteIterator listCorruptFileBlocks(final Path path) throws IOException { - return new CorruptFileBlockIterator(dfs, path); + Path absF = fixRelativePart(path); + return new FileSystemLinkResolver>() { + @Override + public RemoteIterator doCall(final Path path) throws IOException, + UnresolvedLinkException { + return new CorruptFileBlockIterator(dfs, path); + } + + @Override + public RemoteIterator next(final FileSystem fs, final Path path) + throws IOException { + return fs.listCorruptFileBlocks(path); + } + }.resolve(this, absF); } /** @return datanode statistics. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 24c359b9ade..9d73776d049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2004,6 +2004,9 @@ Release 2.8.0 - UNRELEASED HDFS-9142. Separating Configuration object for namenode(s) in MiniDFSCluster. (Siqi Li via mingma) + HDFS-8941. DistributedFileSystem listCorruptFileBlocks API should + resolve relative path. (Rakesh R via wang) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 99dce1d0371..0b273dfa0ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -530,4 +530,72 @@ public void testMaxCorruptFiles() throws Exception { } } + @Test(timeout = 60000) + public void testListCorruptFileBlocksOnRelativePath() throws Exception { + Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); + conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); + + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + FileSystem fs = cluster.getFileSystem(); + DistributedFileSystem dfs = (DistributedFileSystem) fs; + final Path baseDir = new Path("/somewhere/base"); + fs.mkdirs(baseDir); + // set working dir + fs.setWorkingDirectory(baseDir); + + DFSTestUtil util = new DFSTestUtil.Builder() + .setName("testGetCorruptFilesOnRelativePath").setNumFiles(3) + .setMaxLevels(1).setMaxSize(1024).build(); + util.createFiles(fs, "corruptData"); + + RemoteIterator corruptFileBlocks = dfs + .listCorruptFileBlocks(new Path("corruptData")); + int numCorrupt = countPaths(corruptFileBlocks); + assertTrue(numCorrupt == 0); + + // delete the blocks + String bpid = cluster.getNamesystem().getBlockPoolId(); + // For loop through number of data directories per datanode (2) + for (int i = 0; i < 2; i++) { + File storageDir = cluster.getInstanceStorageDir(0, i); + File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); + List metadataFiles = MiniDFSCluster + .getAllBlockMetadataFiles(data_dir); + if (metadataFiles == null) + continue; + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + LOG.info("Deliberately removing file " + blockFile.getName()); + assertTrue("Cannot remove file.", blockFile.delete()); + LOG.info("Deliberately removing file " + metadataFile.getName()); + assertTrue("Cannot remove file.", metadataFile.delete()); + } + } + + int count = 0; + corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("corruptData")); + numCorrupt = countPaths(corruptFileBlocks); + while (numCorrupt < 3) { + Thread.sleep(1000); + corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("corruptData")); + numCorrupt = countPaths(corruptFileBlocks); + count++; + if (count > 30) + break; + } + // Validate we get all the corrupt files + LOG.info("Namenode has bad files. " + numCorrupt); + assertTrue("Failed to get corrupt files!", numCorrupt == 3); + + util.cleanup(fs, "corruptData"); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }