HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion. Contributed by Juan Yu and Jing Zhao.
(cherry picked from commit 6b441d227a
)
This commit is contained in:
parent
5c63482aaa
commit
c8b254d70e
|
@ -290,6 +290,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HDFS-4852. libhdfs documentation is out of date. (cnauroth)
|
||||
|
||||
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
|
||||
(Juan Yu and jing9 via jing9)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -720,6 +720,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
|
|||
counts.add(lastDiff.diff.destroyCreatedList(currentINode,
|
||||
collectedBlocks, removedINodes));
|
||||
}
|
||||
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||
} else {
|
||||
// update prior
|
||||
prior = getDiffs().updatePrior(snapshot, prior);
|
||||
|
@ -737,7 +739,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
|
|||
|
||||
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
|
||||
currentINode, collectedBlocks, removedINodes, countDiffChange));
|
||||
|
||||
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||
|
||||
// check priorDiff again since it may be created during the diff deletion
|
||||
if (prior != Snapshot.NO_SNAPSHOT_ID) {
|
||||
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
|
||||
|
@ -776,9 +780,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
|
|||
}
|
||||
}
|
||||
}
|
||||
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||
|
||||
|
||||
if (currentINode.isQuotaSet()) {
|
||||
currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
|
||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -559,7 +560,81 @@ public class TestSnapshotDeletion {
|
|||
+ toDeleteFileInSnapshot.toString(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Delete a snapshot that is taken before a directory deletion,
|
||||
* directory diff list should be combined correctly.
|
||||
*/
|
||||
@Test (timeout=60000)
|
||||
public void testDeleteSnapshot1() throws Exception {
|
||||
final Path root = new Path("/");
|
||||
|
||||
Path dir = new Path("/dir1");
|
||||
Path file1 = new Path(dir, "file1");
|
||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
hdfs.allowSnapshot(root);
|
||||
hdfs.createSnapshot(root, "s1");
|
||||
|
||||
Path file2 = new Path(dir, "file2");
|
||||
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
hdfs.createSnapshot(root, "s2");
|
||||
|
||||
// delete file
|
||||
hdfs.delete(file1, true);
|
||||
hdfs.delete(file2, true);
|
||||
|
||||
// delete directory
|
||||
assertTrue(hdfs.delete(dir, false));
|
||||
|
||||
// delete second snapshot
|
||||
hdfs.deleteSnapshot(root, "s2");
|
||||
|
||||
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
|
||||
NameNodeAdapter.saveNamespace(cluster.getNameNode());
|
||||
|
||||
// restart NN
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot that is taken before a directory deletion (recursively),
|
||||
* directory diff list should be combined correctly.
|
||||
*/
|
||||
@Test (timeout=60000)
|
||||
public void testDeleteSnapshot2() throws Exception {
|
||||
final Path root = new Path("/");
|
||||
|
||||
Path dir = new Path("/dir1");
|
||||
Path file1 = new Path(dir, "file1");
|
||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
hdfs.allowSnapshot(root);
|
||||
hdfs.createSnapshot(root, "s1");
|
||||
|
||||
Path file2 = new Path(dir, "file2");
|
||||
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
|
||||
INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
|
||||
long file2NodeId = file2Node.getId();
|
||||
|
||||
hdfs.createSnapshot(root, "s2");
|
||||
|
||||
// delete directory recursively
|
||||
assertTrue(hdfs.delete(dir, true));
|
||||
assertNotNull(fsdir.getInode(file2NodeId));
|
||||
|
||||
// delete second snapshot
|
||||
hdfs.deleteSnapshot(root, "s2");
|
||||
assertTrue(fsdir.getInode(file2NodeId) == null);
|
||||
|
||||
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
|
||||
NameNodeAdapter.saveNamespace(cluster.getNameNode());
|
||||
|
||||
// restart NN
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test deleting snapshots in a more complicated scenario: need to combine
|
||||
* snapshot diffs, but no need to handle diffs distributed in a dir tree
|
||||
|
|
Loading…
Reference in New Issue