From 6c50341ea88320dccdc861ae4f281adb67aec05c Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 24 May 2017 18:11:42 -0700 Subject: [PATCH] Revert "HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang." This reverts commit f9d3bb3b8335889b30691baca4331f6f9ed28f69. --- .../DirectoryWithSnapshotFeature.java | 5 -- .../snapshot/TestRenameWithSnapshots.java | 6 +- .../snapshot/TestSnapshotDeletion.java | 75 ------------------- 3 files changed, 2 insertions(+), 84 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 984067990ca..9addbfa7ace 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -633,11 +633,6 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { for(DirectoryDiff d : diffs) { for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) { context.reportDeletedSnapshottedNode(deletedNode); - if (deletedNode.isDirectory()){ - DirectoryWithSnapshotFeature sf = - deletedNode.asDirectory().getDirectoryWithSnapshotFeature(); - sf.computeContentSummary4Snapshot(context); - } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 9b5c0b0835d..143a7b6ac81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -26,7 +26,6 @@ import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; -import static org.apache.hadoop.test.GenericTestUtils.getTestDir; import java.io.File; import java.io.IOException; @@ -2428,7 +2427,7 @@ public class TestRenameWithSnapshots { */ @Test (timeout=300000) public void testDu() throws Exception { - File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir()); + File tempFile = File.createTempFile("testDu-", ".tmp"); tempFile.deleteOnExit(); final FileSystem localfs = FileSystem.getLocal(conf); @@ -2538,8 +2537,7 @@ public class TestRenameWithSnapshots { */ @Test (timeout=300000) public void testDuMultipleDirs() throws Exception { - File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp", - getTestDir()); + File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp"); tempFile.deleteOnExit(); final FileSystem localfs = FileSystem.getLocal(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index 7926e44d0cf..ca53788e98d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -27,7 +27,6 @@ import java.io.PrintStream; import java.security.PrivilegedAction; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; @@ -1233,78 +1232,4 @@ public class TestSnapshotDeletion { // make sure bar has been cleaned from inodeMap Assert.assertNull(fsdir.getInode(fileId)); } - - /** - * Test for HDFS-11515. - * In a scenario where a directory with subdirectories is removed from the - * file system after taking a snapshot on one of its ancestors, du command - * fails with a ConcurrentModificationException until a new snapshot is taken, - * or the old snapshots are removed. - * This test is testing this scenario with checks on the space consumed - * calculation. - * - * @throws Exception - */ - @Test(timeout = 180000) - public void testDuWithRmdirInSnapshots() throws Exception { - final Path parent = new Path("/testDuWithRmdirInSnapshots"); - final Path snapshotDir = new Path(parent, "snapshotDir"); - final Path dir1 = new Path(snapshotDir, "d1"); //snapshotDir/d1 - final Path dir2 = new Path(snapshotDir, "d2"); //snapshotDir/d2 - final Path dir4 = new Path(dir2, "d4"); //snapshotDir/d2/d4 - final Path dir3 = new Path(snapshotDir, "d3"); //snapshotDir/d3 - final Path dir5 = new Path(dir3, "d5"); //snapshotDir/d3/d5 - final Path aFileOutsideSnapshots = new Path(parent, "aFile"); - final Path aFileInsideSnapshots = new Path(dir5, "aFile"); - - final String snapshotName = "s1"; - final String snapshotName2 = "s2"; - - final long spaceConsumed = BLOCKSIZE * REPLICATION; - final long spaceConsumed2 = 2 * spaceConsumed; - ContentSummary summary = null; - - DFSTestUtil.createFile(hdfs, aFileOutsideSnapshots, - BLOCKSIZE, REPLICATION, 0); - summary = hdfs.getContentSummary(parent); - assertEquals("Du is wrong even with one file without further ado.", - spaceConsumed, summary.getSpaceConsumed()); - - hdfs.mkdirs(snapshotDir); - hdfs.allowSnapshot(snapshotDir); - hdfs.mkdirs(dir1); - - hdfs.createSnapshot(snapshotDir, snapshotName); - - hdfs.mkdirs(dir4); - hdfs.mkdirs(dir5); - DFSTestUtil.createFile(hdfs, aFileInsideSnapshots, - BLOCKSIZE, REPLICATION, 0); - summary = hdfs.getContentSummary(parent); - assertEquals("Du is wrong with 2 files added to the file system.", - spaceConsumed2, summary.getSpaceConsumed()); - - hdfs.createSnapshot(snapshotDir, snapshotName2); - - hdfs.delete(dir2, true); - hdfs.delete(dir3, true); - - summary = hdfs.getContentSummary(parent); - assertEquals("Snapshot file count is not matching expected value.", - 1, summary.getSnapshotFileCount()); - assertEquals("Snapshot directory count is not matching expected value.", - 4, summary.getSnapshotDirectoryCount()); - assertEquals("Consumed space does not matching expected value.", - spaceConsumed, summary.getSnapshotSpaceConsumed()); - assertEquals("Snapshot length is not matching expected value.", - BLOCKSIZE, summary.getSnapshotLength()); - assertEquals("File count is not matching expected value.", - 2, summary.getFileCount()); - assertEquals("Directory count is not matching expected value.", - 7, summary.getDirectoryCount()); - assertEquals("Consumed space is not matching expected value.", - spaceConsumed2, summary.getSpaceConsumed()); - assertEquals("Length is not matching expected value.", - 2 * BLOCKSIZE, summary.getLength()); - } }