HDFS-5982. Need to update snapshot manager when applying editlog for deleting a snapshottable directory. Contributed by Jing Zhao.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1570395 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7568e2f193
commit
4da6de1ca3
|
@ -543,6 +543,9 @@ Release 2.4.0 - UNRELEASED
|
|||
HDFS-5944. LeaseManager:findLeaseWithPrefixPath can't handle path like /a/b/
|
||||
and cause SecondaryNameNode failed do checkpoint (Yunjiong Zhao via brandonli)
|
||||
|
||||
HDFS-5982. Need to update snapshot manager when applying editlog for deleting
|
||||
a snapshottable directory. (jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
||||
|
|
|
@ -1317,20 +1317,12 @@ public class FSDirectory implements Closeable {
|
|||
if (!deleteAllowed(inodesInPath, src) ) {
|
||||
filesRemoved = -1;
|
||||
} else {
|
||||
// Before removing the node, first check if the targetNode is for a
|
||||
// snapshottable dir with snapshots, or its descendants have
|
||||
// snapshottable dir with snapshots
|
||||
final INode targetNode = inodesInPath.getLastINode();
|
||||
List<INodeDirectorySnapshottable> snapshottableDirs =
|
||||
new ArrayList<INodeDirectorySnapshottable>();
|
||||
checkSnapshot(targetNode, snapshottableDirs);
|
||||
checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
|
||||
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
|
||||
removedINodes, now);
|
||||
if (snapshottableDirs.size() > 0) {
|
||||
// There are some snapshottable directories without snapshots to be
|
||||
// deleted. Need to update the SnapshotManager.
|
||||
namesystem.removeSnapshottableDirs(snapshottableDirs);
|
||||
}
|
||||
namesystem.removeSnapshottableDirs(snapshottableDirs);
|
||||
}
|
||||
} finally {
|
||||
writeUnlock();
|
||||
|
@ -1392,18 +1384,25 @@ public class FSDirectory implements Closeable {
|
|||
* @param src a string representation of a path to an inode
|
||||
* @param mtime the time the inode is removed
|
||||
* @throws SnapshotAccessControlException if path is in RO snapshot
|
||||
*/
|
||||
*/
|
||||
void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException,
|
||||
QuotaExceededException, SnapshotAccessControlException {
|
||||
QuotaExceededException, SnapshotAccessControlException, IOException {
|
||||
assert hasWriteLock();
|
||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||
List<INode> removedINodes = new ChunkedArrayList<INode>();
|
||||
|
||||
final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
|
||||
normalizePath(src), false);
|
||||
final long filesRemoved = deleteAllowed(inodesInPath, src) ?
|
||||
unprotectedDelete(inodesInPath, collectedBlocks,
|
||||
removedINodes, mtime) : -1;
|
||||
long filesRemoved = -1;
|
||||
if (deleteAllowed(inodesInPath, src)) {
|
||||
List<INodeDirectorySnapshottable> snapshottableDirs =
|
||||
new ArrayList<INodeDirectorySnapshottable>();
|
||||
checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
|
||||
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
|
||||
removedINodes, mtime);
|
||||
namesystem.removeSnapshottableDirs(snapshottableDirs);
|
||||
}
|
||||
|
||||
if (filesRemoved >= 0) {
|
||||
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
|
||||
removedINodes);
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
|
@ -128,7 +129,42 @@ public class TestSnapshotDeletion {
|
|||
exception.expectMessage(error);
|
||||
hdfs.delete(sub, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test applying editlog of operation which deletes a snapshottable directory
|
||||
* without snapshots. The snapshottable dir list in snapshot manager should be
|
||||
* updated.
|
||||
*/
|
||||
@Test (timeout=300000)
|
||||
public void testApplyEditLogForDeletion() throws Exception {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path bar1 = new Path(foo, "bar1");
|
||||
final Path bar2 = new Path(foo, "bar2");
|
||||
hdfs.mkdirs(bar1);
|
||||
hdfs.mkdirs(bar2);
|
||||
|
||||
// allow snapshots on bar1 and bar2
|
||||
hdfs.allowSnapshot(bar1);
|
||||
hdfs.allowSnapshot(bar2);
|
||||
assertEquals(2, cluster.getNamesystem().getSnapshotManager()
|
||||
.getNumSnapshottableDirs());
|
||||
assertEquals(2, cluster.getNamesystem().getSnapshotManager()
|
||||
.getSnapshottableDirs().length);
|
||||
|
||||
// delete /foo
|
||||
hdfs.delete(foo, true);
|
||||
cluster.restartNameNode(0);
|
||||
// the snapshottable dir list in snapshot manager should be empty
|
||||
assertEquals(0, cluster.getNamesystem().getSnapshotManager()
|
||||
.getNumSnapshottableDirs());
|
||||
assertEquals(0, cluster.getNamesystem().getSnapshotManager()
|
||||
.getSnapshottableDirs().length);
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
hdfs.saveNamespace();
|
||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
cluster.restartNameNode(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deleting directory with snapshottable descendant with snapshots must fail.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue