HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed file/directory while deletion. Contributed by Jing Zhao.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1540142 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
97199baea1
commit
f2ad787a27
|
@ -508,6 +508,9 @@ Release 2.3.0 - UNRELEASED
|
|||
HDFS-5443. Delete 0-sized block when deleting an under-construction file that
|
||||
is included in snapshot. (jing9)
|
||||
|
||||
HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed
|
||||
file/directory while deletion. (jing9)
|
||||
|
||||
Release 2.2.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -646,16 +646,14 @@ public abstract class INodeReference extends INode {
|
|||
FileWithSnapshot sfile = (FileWithSnapshot) referred;
|
||||
// make sure we mark the file as deleted
|
||||
sfile.deleteCurrentFile();
|
||||
if (snapshot != null) {
|
||||
try {
|
||||
// when calling cleanSubtree of the referred node, since we
|
||||
// compute quota usage updates before calling this destroy
|
||||
// function, we use true for countDiffChange
|
||||
referred.cleanSubtree(snapshot, prior, collectedBlocks,
|
||||
removedINodes, true);
|
||||
} catch (QuotaExceededException e) {
|
||||
LOG.error("should not exceed quota while snapshot deletion", e);
|
||||
}
|
||||
try {
|
||||
// when calling cleanSubtree of the referred node, since we
|
||||
// compute quota usage updates before calling this destroy
|
||||
// function, we use true for countDiffChange
|
||||
referred.cleanSubtree(snapshot, prior, collectedBlocks,
|
||||
removedINodes, true);
|
||||
} catch (QuotaExceededException e) {
|
||||
LOG.error("should not exceed quota while snapshot deletion", e);
|
||||
}
|
||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
||||
// similarly, if referred is a directory, it must be an
|
||||
|
|
|
@ -716,14 +716,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||
List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
|
||||
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
||||
priorCreated = new HashMap<INode, INode>(cList.size());
|
||||
for (INode cNode : cList) {
|
||||
priorCreated.put(cNode, cNode);
|
||||
}
|
||||
priorDeleted = new HashMap<INode, INode>(dList.size());
|
||||
for (INode dNode : dList) {
|
||||
priorDeleted.put(dNode, dNode);
|
||||
}
|
||||
priorCreated = cloneDiffList(cList);
|
||||
priorDeleted = cloneDiffList(dList);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -896,6 +890,17 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
counts.add(Content.DIRECTORY, diffs.asList().size());
|
||||
}
|
||||
|
||||
private static Map<INode, INode> cloneDiffList(List<INode> diffList) {
|
||||
if (diffList == null || diffList.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
Map<INode, INode> map = new HashMap<INode, INode>(diffList.size());
|
||||
for (INode node : diffList) {
|
||||
map.put(node, node);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy a subtree under a DstReference node.
|
||||
*/
|
||||
|
@ -914,26 +919,28 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
|
||||
prior, collectedBlocks, removedINodes);
|
||||
}
|
||||
} else if (inode.isFile() && snapshot != null) {
|
||||
} else if (inode.isFile()) {
|
||||
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
|
||||
} else if (inode.isDirectory()) {
|
||||
Map<INode, INode> excludedNodes = null;
|
||||
if (inode instanceof INodeDirectoryWithSnapshot) {
|
||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode;
|
||||
|
||||
DirectoryDiffList diffList = sdir.getDiffs();
|
||||
DirectoryDiff priorDiff = diffList.getDiff(prior);
|
||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
||||
excludedNodes = cloneDiffList(dList);
|
||||
}
|
||||
|
||||
if (snapshot != null) {
|
||||
diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks,
|
||||
removedINodes, true);
|
||||
}
|
||||
DirectoryDiff priorDiff = diffList.getDiff(prior);
|
||||
priorDiff = diffList.getDiff(prior);
|
||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||
priorDiff.diff.destroyCreatedList(sdir, collectedBlocks,
|
||||
removedINodes);
|
||||
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
||||
excludedNodes = new HashMap<INode, INode>(dList.size());
|
||||
for (INode dNode : dList) {
|
||||
excludedNodes.put(dNode, dNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (INode child : inode.asDirectory().getChildrenList(prior)) {
|
||||
|
|
|
@ -109,8 +109,10 @@ public class INodeFileUnderConstructionWithSnapshot
|
|||
final List<INode> removedINodes, final boolean countDiffChange)
|
||||
throws QuotaExceededException {
|
||||
if (snapshot == null) { // delete the current file
|
||||
recordModification(prior, null);
|
||||
isCurrentFileDeleted = true;
|
||||
if (!isCurrentFileDeleted()) {
|
||||
recordModification(prior, null);
|
||||
deleteCurrentFile();
|
||||
}
|
||||
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
|
||||
return Quota.Counts.newInstance();
|
||||
} else { // delete a snapshot
|
||||
|
|
|
@ -96,8 +96,10 @@ public class INodeFileWithSnapshot extends INodeFile
|
|||
final List<INode> removedINodes, final boolean countDiffChange)
|
||||
throws QuotaExceededException {
|
||||
if (snapshot == null) { // delete the current file
|
||||
recordModification(prior, null);
|
||||
isCurrentFileDeleted = true;
|
||||
if (!isCurrentFileDeleted()) {
|
||||
recordModification(prior, null);
|
||||
deleteCurrentFile();
|
||||
}
|
||||
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
|
||||
return Quota.Counts.newInstance();
|
||||
} else { // delete a snapshot
|
||||
|
|
|
@ -2243,4 +2243,50 @@ public class TestRenameWithSnapshots {
|
|||
|
||||
restartClusterAndCheckImage(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure we clean the whole subtree under a DstReference node after
|
||||
* deleting a snapshot.
|
||||
* see HDFS-5476.
|
||||
*/
|
||||
@Test
|
||||
public void testCleanDstReference() throws Exception {
|
||||
final Path test = new Path("/test");
|
||||
final Path foo = new Path(test, "foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
hdfs.mkdirs(bar);
|
||||
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
|
||||
|
||||
// create file after s0 so that the file should not be included in s0
|
||||
final Path fileInBar = new Path(bar, "file");
|
||||
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
|
||||
// rename foo --> foo2
|
||||
final Path foo2 = new Path(test, "foo2");
|
||||
hdfs.rename(foo, foo2);
|
||||
// create snapshot s1, note the file is included in s1
|
||||
hdfs.createSnapshot(test, "s1");
|
||||
// delete bar and foo2
|
||||
hdfs.delete(new Path(foo2, "bar"), true);
|
||||
hdfs.delete(foo2, true);
|
||||
|
||||
final Path sfileInBar = SnapshotTestHelper.getSnapshotPath(test, "s1",
|
||||
"foo2/bar/file");
|
||||
assertTrue(hdfs.exists(sfileInBar));
|
||||
|
||||
hdfs.deleteSnapshot(test, "s1");
|
||||
assertFalse(hdfs.exists(sfileInBar));
|
||||
|
||||
restartClusterAndCheckImage(true);
|
||||
// make sure the file under bar is deleted
|
||||
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
|
||||
"foo/bar");
|
||||
INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
|
||||
.getINode(barInS0.toString());
|
||||
assertEquals(0, barNode.getChildrenList(null).size());
|
||||
List<DirectoryDiff> diffList = barNode.getDiffs().asList();
|
||||
assertEquals(1, diffList.size());
|
||||
DirectoryDiff diff = diffList.get(0);
|
||||
assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size());
|
||||
assertEquals(0, diff.getChildrenDiff().getList(ListType.CREATED).size());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -347,4 +347,49 @@ public class TestSnapshotBlocksMap {
|
|||
assertEquals(1, blks.length);
|
||||
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. rename under-construction file with 0-sized blocks after snapshot.
|
||||
* 2. delete the renamed directory.
|
||||
* make sure we delete the 0-sized block.
|
||||
* see HDFS-5476.
|
||||
*/
|
||||
@Test
|
||||
public void testDeletionWithZeroSizeBlock3() throws Exception {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path subDir = new Path(foo, "sub");
|
||||
final Path bar = new Path(subDir, "bar");
|
||||
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
|
||||
|
||||
hdfs.append(bar);
|
||||
|
||||
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
|
||||
BlockInfo[] blks = barNode.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
|
||||
cluster.getNameNodeRpc()
|
||||
.addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
|
||||
null, barNode.getId(), null);
|
||||
|
||||
SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
|
||||
|
||||
// rename bar
|
||||
final Path bar2 = new Path(subDir, "bar2");
|
||||
hdfs.rename(bar, bar2);
|
||||
|
||||
INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
|
||||
blks = bar2Node.getBlocks();
|
||||
assertEquals(2, blks.length);
|
||||
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
||||
assertEquals(0, blks[1].getNumBytes());
|
||||
|
||||
// delete subDir
|
||||
hdfs.delete(subDir, true);
|
||||
|
||||
final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
|
||||
barNode = fsdir.getINode(sbar.toString()).asFile();
|
||||
blks = barNode.getBlocks();
|
||||
assertEquals(1, blks.length);
|
||||
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue