diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 8433bbacd35..a1bfe0f24bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -269,3 +269,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4719. Remove AbstractINodeDiff.Factory and move its methods to AbstractINodeDiffList. (Arpit Agarwal via szetszwo) + + HDFS-4735. DisallowSnapshot throws IllegalStateException for nested + snapshottable directories. (Jing Zhao via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6a530d665e1..462282d1cd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5829,8 +5829,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, writeUnlock(); } getEditLog().logSync(); - - //TODO: need to update metrics in corresponding SnapshotManager method if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(true, "allowSnapshot", path, null, null); @@ -5855,8 +5853,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, writeUnlock(); } getEditLog().logSync(); - - //TODO: need to update metrics in corresponding SnapshotManager method if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(true, "disallowSnapshot", path, null, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index ec6a24bb0b9..c281667bafa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -173,8 +173,6 @@ public class INodeDirectory extends INodeWithAdditionalFields { /** Replace itself with an {@link INodeDirectoryWithSnapshot}. */ public INodeDirectoryWithSnapshot replaceSelf4INodeDirectoryWithSnapshot() { - Preconditions.checkState(!(this instanceof INodeDirectoryWithSnapshot), - "this is already an INodeDirectoryWithSnapshot, this=%s", this); return replaceSelf(new INodeDirectoryWithSnapshot(this)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java index dee082e7005..c79dacc99f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java @@ -271,7 +271,7 @@ public class Diff> { // Case 1.1.3 and 2.3.3: element is already in c-list, previous = created.set(c, newElement); - //TODO: fix a bug that previous != oldElement.Set it to oldElement for now + // For previous != oldElement, set it to oldElement previous = oldElement; } else { d = search(deleted, oldElement.getKey()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index b5b969f9e53..2150cef7fbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -173,7 +173,7 @@ public class TestFSImageWithSnapshot { * 6. Dump the FSDirectory again and compare the two dumped string. * */ - @Test (timeout=60000) + @Test public void testSaveLoadImage() throws Exception { int s = 0; // make changes to the namesystem @@ -213,8 +213,9 @@ public class TestFSImageWithSnapshot { hdfs.rename(sub2file2, sub1_sub2file2); hdfs.rename(sub1file1, sub2file1); - // TODO: fix case hdfs.rename(sub1file1, sub1file2); - + checkImage(s); + + hdfs.rename(sub2file1, sub2file2); checkImage(s); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java index f279c467152..b0ff58b384f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java @@ -148,8 +148,6 @@ public class TestDisallowModifyROSnapshot { public void testCreateSymlink() throws Exception { @SuppressWarnings("deprecation") DFSClient dfsclient = new DFSClient(conf); - // TODO: if link is objInSnapshot, ParentNotDirectoryException got thrown - // first by verifyParentDir() dfsclient.createSymlink(sub2.toString(), "/TestSnapshot/sub1/.snapshot", false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index 58415a73b37..b3a616bd46d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Random; @@ -34,11 +35,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.ipc.RemoteException; -import org.junit.AfterClass; +import org.junit.After; import org.junit.Assert; -import org.junit.BeforeClass; +import org.junit.Before; import org.junit.Test; /** Testing nested snapshots. */ @@ -57,16 +60,16 @@ public class TestNestedSnapshots { private static MiniDFSCluster cluster; private static DistributedFileSystem hdfs; - @BeforeClass - public static void setUp() throws Exception { + @Before + public void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); } - @AfterClass - public static void tearDown() throws Exception { + @After + public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); } @@ -279,4 +282,32 @@ public class TestNestedSnapshots { } } } + + /** + * When we have nested snapshottable directories and if we try to reset the + * snapshottable descendant back to an regular directory, we need to replace + * the snapshottable descendant with an INodeDirectoryWithSnapshot + */ + @Test + public void testDisallowNestedSnapshottableDir() throws Exception { + final Path dir = new Path("/dir"); + final Path sub = new Path(dir, "sub"); + hdfs.mkdirs(sub); + + SnapshotTestHelper.createSnapshot(hdfs, dir, "s1"); + final Path file = new Path(sub, "file"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED); + + FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); + INode subNode = fsdir.getINode(sub.toString()); + assertTrue(subNode instanceof INodeDirectoryWithSnapshot); + + hdfs.allowSnapshot(sub); + subNode = fsdir.getINode(sub.toString()); + assertTrue(subNode instanceof INodeDirectorySnapshottable); + + hdfs.disallowSnapshot(sub); + subNode = fsdir.getINode(sub.toString()); + assertTrue(subNode instanceof INodeDirectoryWithSnapshot); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index dce36039ee9..d06d9033fa4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -411,7 +411,6 @@ public class TestSnapshot { * the owner, and the other indicates the group */ private String[] genRandomOwner() { - // TODO String[] userGroup = new String[]{"dr.who", "unknown"}; return userGroup; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java index b03cb8b23f7..91f28ade185 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java @@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; import java.io.IOException; -import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -46,9 +45,6 @@ import org.junit.Test; * Test cases for snapshot-related information in blocksMap. */ public class TestSnapshotBlocksMap { - // TODO: fix concat for snapshot - private static final boolean runConcatTest = false; - private static final long seed = 0; private static final short REPLICATION = 3; private static final int BLOCKSIZE = 1024; @@ -208,36 +204,5 @@ public class TestSnapshotBlocksMap { } catch (IOException e) { assertExceptionContains("File does not exist: " + s1f0, e); } - - // concat file1, file3 and file5 to file4 - if (runConcatTest) { - final INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir, - blockmanager); - final BlockInfo[] f1blocks = f1.getBlocks(); - final INodeFile f3 = assertBlockCollection(file3.toString(), 5, fsdir, - blockmanager); - final BlockInfo[] f3blocks = f3.getBlocks(); - final INodeFile f5 = assertBlockCollection(file5.toString(), 7, fsdir, - blockmanager); - final BlockInfo[] f5blocks = f5.getBlocks(); - assertBlockCollection(file4.toString(), 1, fsdir, blockmanager); - - hdfs.concat(file4, new Path[]{file1, file3, file5}); - - final INodeFile f4 = assertBlockCollection(file4.toString(), 15, fsdir, - blockmanager); - final BlockInfo[] blocks4 = f4.getBlocks(); - for(BlockInfo[] blocks : Arrays.asList(f1blocks, f3blocks, blocks4, f5blocks)) { - for(BlockInfo b : blocks) { - assertBlockCollection(blockmanager, f4, b); - } - } - assertAllNull(f1, file1, snapshots); - assertAllNull(f3, file3, snapshots); - assertAllNull(f5, file5, snapshots); - } } - - // TODO: test for deletion file which was appended after taking snapshots - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java index 0498c51cc8d..13428e5983e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java @@ -213,7 +213,6 @@ public class TestSnapshotReplication { checkFileReplication(file1, REPLICATION, REPLICATION); checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION); - // TODO: check replication after deleting snapshot(s) // Delete file1 hdfs.delete(file1, true); // Check replication of snapshots