HDFS-4877. Merging change r1490421 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1490430 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
09c12434a2
commit
3b08699b2c
|
@ -838,6 +838,9 @@ Release 2.1.0-beta - UNRELEASED
|
||||||
HDFS-4850. Fix OfflineImageViewer to work on fsimages with empty files or
|
HDFS-4850. Fix OfflineImageViewer to work on fsimages with empty files or
|
||||||
snapshots. (jing9)
|
snapshots. (jing9)
|
||||||
|
|
||||||
|
HDFS-4877. Snapshot: fix the scenario where a directory is renamed under
|
||||||
|
its prior descendant. (jing9)
|
||||||
|
|
||||||
Release 2.0.5-alpha - UNRELEASED
|
Release 2.0.5-alpha - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -678,7 +678,7 @@ public class FSDirectory implements Closeable {
|
||||||
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
|
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
|
||||||
newSrcCounts.subtract(oldSrcCounts);
|
newSrcCounts.subtract(oldSrcCounts);
|
||||||
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
|
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
|
||||||
newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID);
|
newSrcCounts.get(Quota.DISKSPACE), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -944,8 +944,8 @@ public class FSDirectory implements Closeable {
|
||||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||||
List<INode> removedINodes = new ArrayList<INode>();
|
List<INode> removedINodes = new ArrayList<INode>();
|
||||||
filesDeleted = removedDst.cleanSubtree(null,
|
filesDeleted = removedDst.cleanSubtree(null,
|
||||||
dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes).get(
|
dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true)
|
||||||
Quota.NAMESPACE);
|
.get(Quota.NAMESPACE);
|
||||||
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
|
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes);
|
||||||
}
|
}
|
||||||
|
@ -963,7 +963,7 @@ public class FSDirectory implements Closeable {
|
||||||
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
|
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
|
||||||
newSrcCounts.subtract(oldSrcCounts);
|
newSrcCounts.subtract(oldSrcCounts);
|
||||||
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
|
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
|
||||||
newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID);
|
newSrcCounts.get(Quota.DISKSPACE), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return filesDeleted >= 0;
|
return filesDeleted >= 0;
|
||||||
|
@ -1408,9 +1408,9 @@ public class FSDirectory implements Closeable {
|
||||||
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
||||||
} else {
|
} else {
|
||||||
Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot,
|
Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot,
|
||||||
collectedBlocks, removedINodes);
|
collectedBlocks, removedINodes, true);
|
||||||
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
||||||
-counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
|
-counts.get(Quota.DISKSPACE), true);
|
||||||
removed = counts.get(Quota.NAMESPACE);
|
removed = counts.get(Quota.NAMESPACE);
|
||||||
}
|
}
|
||||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||||
|
@ -1797,7 +1797,8 @@ public class FSDirectory implements Closeable {
|
||||||
final INode[] inodes = inodesInPath.getINodes();
|
final INode[] inodes = inodesInPath.getINodes();
|
||||||
for(int i=0; i < numOfINodes; i++) {
|
for(int i=0; i < numOfINodes; i++) {
|
||||||
if (inodes[i].isQuotaSet()) { // a directory with quota
|
if (inodes[i].isQuotaSet()) { // a directory with quota
|
||||||
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
|
INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i]
|
||||||
|
.asDirectory();
|
||||||
node.addSpaceConsumed2Cache(nsDelta, dsDelta);
|
node.addSpaceConsumed2Cache(nsDelta, dsDelta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2033,7 +2034,8 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
if (inodes[i].isQuotaSet()) { // a directory with quota
|
if (inodes[i].isQuotaSet()) { // a directory with quota
|
||||||
try {
|
try {
|
||||||
((INodeDirectoryWithQuota)inodes[i]).verifyQuota(nsDelta, dsDelta);
|
((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota(
|
||||||
|
nsDelta, dsDelta);
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
e.setPathName(getFullPathName(inodes, i));
|
e.setPathName(getFullPathName(inodes, i));
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -342,12 +342,13 @@ public abstract class INode implements Diff.Element<byte[]> {
|
||||||
* deletion/update will be added to the given map.
|
* deletion/update will be added to the given map.
|
||||||
* @param removedINodes
|
* @param removedINodes
|
||||||
* INodes collected from the descents for further cleaning up of
|
* INodes collected from the descents for further cleaning up of
|
||||||
* inodeMap
|
* inodeMap
|
||||||
* @return quota usage delta when deleting a snapshot
|
* @return quota usage delta when deleting a snapshot
|
||||||
*/
|
*/
|
||||||
public abstract Quota.Counts cleanSubtree(final Snapshot snapshot,
|
public abstract Quota.Counts cleanSubtree(final Snapshot snapshot,
|
||||||
Snapshot prior, BlocksMapUpdateInfo collectedBlocks,
|
Snapshot prior, BlocksMapUpdateInfo collectedBlocks,
|
||||||
List<INode> removedINodes) throws QuotaExceededException;
|
List<INode> removedINodes, boolean countDiffChange)
|
||||||
|
throws QuotaExceededException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Destroy self and clear everything! If the INode is a file, this method
|
* Destroy self and clear everything! If the INode is a file, this method
|
||||||
|
@ -388,17 +389,10 @@ public abstract class INode implements Diff.Element<byte[]> {
|
||||||
* Check and add namespace/diskspace consumed to itself and the ancestors.
|
* Check and add namespace/diskspace consumed to itself and the ancestors.
|
||||||
* @throws QuotaExceededException if quote is violated.
|
* @throws QuotaExceededException if quote is violated.
|
||||||
*/
|
*/
|
||||||
public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify,
|
public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify)
|
||||||
int snapshotId) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
if (parent != null) {
|
if (parent != null) {
|
||||||
parent.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
|
parent.addSpaceConsumed(nsDelta, dsDelta, verify);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta,
|
|
||||||
boolean verify, int snapshotId) throws QuotaExceededException {
|
|
||||||
if (parent != null) {
|
|
||||||
parent.addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
|
@ -499,7 +500,8 @@ public class INodeDirectory extends INodeWithAdditionalFields {
|
||||||
/** Call cleanSubtree(..) recursively down the subtree. */
|
/** Call cleanSubtree(..) recursively down the subtree. */
|
||||||
public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
|
public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
|
||||||
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
|
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes) throws QuotaExceededException {
|
final List<INode> removedINodes, final Map<INode, INode> excludedNodes,
|
||||||
|
final boolean countDiffChange) throws QuotaExceededException {
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
// in case of deletion snapshot, since this call happens after we modify
|
// in case of deletion snapshot, since this call happens after we modify
|
||||||
// the diff list, the snapshot to be deleted has been combined or renamed
|
// the diff list, the snapshot to be deleted has been combined or renamed
|
||||||
|
@ -508,9 +510,14 @@ public class INodeDirectory extends INodeWithAdditionalFields {
|
||||||
// INodeDirectoryWithSnapshot#cleanSubtree)
|
// INodeDirectoryWithSnapshot#cleanSubtree)
|
||||||
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
|
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
|
||||||
for (INode child : getChildrenList(s)) {
|
for (INode child : getChildrenList(s)) {
|
||||||
Quota.Counts childCounts = child.cleanSubtree(snapshot, prior,
|
if (snapshot != null && excludedNodes != null
|
||||||
collectedBlocks, removedINodes);
|
&& excludedNodes.containsKey(child)) {
|
||||||
counts.add(childCounts);
|
continue;
|
||||||
|
} else {
|
||||||
|
Quota.Counts childCounts = child.cleanSubtree(snapshot, prior,
|
||||||
|
collectedBlocks, removedINodes, countDiffChange);
|
||||||
|
counts.add(childCounts);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return counts;
|
return counts;
|
||||||
}
|
}
|
||||||
|
@ -527,8 +534,9 @@ public class INodeDirectory extends INodeWithAdditionalFields {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes) throws QuotaExceededException {
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
|
throws QuotaExceededException {
|
||||||
if (prior == null && snapshot == null) {
|
if (prior == null && snapshot == null) {
|
||||||
// destroy the whole subtree and collect blocks that should be deleted
|
// destroy the whole subtree and collect blocks that should be deleted
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
|
@ -538,7 +546,7 @@ public class INodeDirectory extends INodeWithAdditionalFields {
|
||||||
} else {
|
} else {
|
||||||
// process recursively down the subtree
|
// process recursively down the subtree
|
||||||
Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
|
Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
|
||||||
collectedBlocks, removedINodes);
|
collectedBlocks, removedINodes, null, countDiffChange);
|
||||||
if (isQuotaSet()) {
|
if (isQuotaSet()) {
|
||||||
((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache(
|
((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache(
|
||||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||||
|
|
|
@ -132,7 +132,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final void addSpaceConsumed(final long nsDelta, final long dsDelta,
|
public final void addSpaceConsumed(final long nsDelta, final long dsDelta,
|
||||||
boolean verify, int snapshotId) throws QuotaExceededException {
|
boolean verify) throws QuotaExceededException {
|
||||||
if (isQuotaSet()) {
|
if (isQuotaSet()) {
|
||||||
// The following steps are important:
|
// The following steps are important:
|
||||||
// check quotas in this inode and all ancestors before changing counts
|
// check quotas in this inode and all ancestors before changing counts
|
||||||
|
@ -143,11 +143,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
|
||||||
verifyQuota(nsDelta, dsDelta);
|
verifyQuota(nsDelta, dsDelta);
|
||||||
}
|
}
|
||||||
// (2) verify quota and then add count in ancestors
|
// (2) verify quota and then add count in ancestors
|
||||||
super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
|
super.addSpaceConsumed(nsDelta, dsDelta, verify);
|
||||||
// (3) add count in this inode
|
// (3) add count in this inode
|
||||||
addSpaceConsumed2Cache(nsDelta, dsDelta);
|
addSpaceConsumed2Cache(nsDelta, dsDelta);
|
||||||
} else {
|
} else {
|
||||||
super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
|
super.addSpaceConsumed(nsDelta, dsDelta, verify);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -286,7 +286,8 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
if (snapshot == null && prior == null) {
|
if (snapshot == null && prior == null) {
|
||||||
|
|
|
@ -113,8 +113,8 @@ public class INodeMap {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
|
public Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
|
||||||
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes)
|
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
|
||||||
throws QuotaExceededException {
|
boolean countDiffChange) throws QuotaExceededException {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -254,10 +254,10 @@ public abstract class INodeReference extends INode {
|
||||||
|
|
||||||
@Override // used by WithCount
|
@Override // used by WithCount
|
||||||
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
|
||||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes,
|
||||||
throws QuotaExceededException {
|
final boolean countDiffChange) throws QuotaExceededException {
|
||||||
return referred.cleanSubtree(snapshot, prior, collectedBlocks,
|
return referred.cleanSubtree(snapshot, prior, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes, countDiffChange);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // used by WithCount
|
@Override // used by WithCount
|
||||||
|
@ -396,29 +396,6 @@ public abstract class INodeReference extends INode {
|
||||||
return withNameList.get(-i - 2);
|
return withNameList.get(-i - 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public final void addSpaceConsumed(long nsDelta, long dsDelta,
|
|
||||||
boolean verify, int snapshotId) throws QuotaExceededException {
|
|
||||||
INodeReference parentRef = getParentReference();
|
|
||||||
if (parentRef != null) {
|
|
||||||
parentRef.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
|
|
||||||
}
|
|
||||||
addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta,
|
|
||||||
boolean verify, int snapshotId) throws QuotaExceededException {
|
|
||||||
if (snapshotId != Snapshot.INVALID_ID) {
|
|
||||||
for (INodeReference.WithName withName : withNameList) {
|
|
||||||
if (withName.getLastSnapshotId() >= snapshotId) {
|
|
||||||
withName.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** A reference with a fixed name. */
|
/** A reference with a fixed name. */
|
||||||
|
@ -476,15 +453,20 @@ public abstract class INodeReference extends INode {
|
||||||
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId);
|
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId);
|
||||||
final INode referred = this.getReferredINode().asReference()
|
final INode referred = this.getReferredINode().asReference()
|
||||||
.getReferredINode();
|
.getReferredINode();
|
||||||
// we cannot use cache for the referred node since its cached quota may
|
// We will continue the quota usage computation using the same snapshot id
|
||||||
// have already been updated by changes in the current tree
|
// as time line (if the given snapshot id is valid). Also, we cannot use
|
||||||
return referred.computeQuotaUsage(counts, false, this.lastSnapshotId);
|
// cache for the referred node since its cached quota may have already
|
||||||
|
// been updated by changes in the current tree.
|
||||||
|
int id = lastSnapshotId > Snapshot.INVALID_ID ?
|
||||||
|
lastSnapshotId : this.lastSnapshotId;
|
||||||
|
return referred.computeQuotaUsage(counts, false, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes) throws QuotaExceededException {
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
|
throws QuotaExceededException {
|
||||||
// since WithName node resides in deleted list acting as a snapshot copy,
|
// since WithName node resides in deleted list acting as a snapshot copy,
|
||||||
// the parameter snapshot must be non-null
|
// the parameter snapshot must be non-null
|
||||||
Preconditions.checkArgument(snapshot != null);
|
Preconditions.checkArgument(snapshot != null);
|
||||||
|
@ -500,11 +482,19 @@ public abstract class INodeReference extends INode {
|
||||||
}
|
}
|
||||||
|
|
||||||
Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior,
|
Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior,
|
||||||
collectedBlocks, removedINodes);
|
collectedBlocks, removedINodes, false);
|
||||||
INodeReference ref = getReferredINode().getParentReference();
|
INodeReference ref = getReferredINode().getParentReference();
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
||||||
-counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
|
-counts.get(Quota.DISKSPACE), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (snapshot.getId() < lastSnapshotId) {
|
||||||
|
// for a WithName node, when we compute its quota usage, we only count
|
||||||
|
// in all the nodes existing at the time of the corresponding rename op.
|
||||||
|
// Thus if we are deleting a snapshot before/at the snapshot associated
|
||||||
|
// with lastSnapshotId, we do not need to update the quota upwards.
|
||||||
|
counts = Quota.Counts.newInstance();
|
||||||
}
|
}
|
||||||
return counts;
|
return counts;
|
||||||
}
|
}
|
||||||
|
@ -529,16 +519,17 @@ public abstract class INodeReference extends INode {
|
||||||
// 1. create snapshot s1 on /test
|
// 1. create snapshot s1 on /test
|
||||||
// 2. rename /test/foo/bar to /test/foo2/bar
|
// 2. rename /test/foo/bar to /test/foo2/bar
|
||||||
// 3. create snapshot s2 on /test
|
// 3. create snapshot s2 on /test
|
||||||
// 4. delete snapshot s2
|
// 4. rename foo2 again
|
||||||
|
// 5. delete snapshot s2
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
Quota.Counts counts = referred.cleanSubtree(snapshot, prior,
|
Quota.Counts counts = referred.cleanSubtree(snapshot, prior,
|
||||||
collectedBlocks, removedINodes);
|
collectedBlocks, removedINodes, false);
|
||||||
INodeReference ref = getReferredINode().getParentReference();
|
INodeReference ref = getReferredINode().getParentReference();
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
||||||
-counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
|
-counts.get(Quota.DISKSPACE), true);
|
||||||
}
|
}
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
LOG.error("should not exceed quota while snapshot deletion", e);
|
LOG.error("should not exceed quota while snapshot deletion", e);
|
||||||
|
@ -588,8 +579,8 @@ public abstract class INodeReference extends INode {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
|
||||||
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes)
|
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
|
||||||
throws QuotaExceededException {
|
final boolean countDiffChange) throws QuotaExceededException {
|
||||||
if (snapshot == null && prior == null) {
|
if (snapshot == null && prior == null) {
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
this.computeQuotaUsage(counts, true);
|
this.computeQuotaUsage(counts, true);
|
||||||
|
@ -609,7 +600,7 @@ public abstract class INodeReference extends INode {
|
||||||
return Quota.Counts.newInstance();
|
return Quota.Counts.newInstance();
|
||||||
}
|
}
|
||||||
return getReferredINode().cleanSubtree(snapshot, prior,
|
return getReferredINode().cleanSubtree(snapshot, prior,
|
||||||
collectedBlocks, removedINodes);
|
collectedBlocks, removedINodes, countDiffChange);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -648,8 +639,11 @@ public abstract class INodeReference extends INode {
|
||||||
sfile.deleteCurrentFile();
|
sfile.deleteCurrentFile();
|
||||||
if (snapshot != null) {
|
if (snapshot != null) {
|
||||||
try {
|
try {
|
||||||
|
// when calling cleanSubtree of the referred node, since we
|
||||||
|
// compute quota usage updates before calling this destroy
|
||||||
|
// function, we use true for countDiffChange
|
||||||
referred.cleanSubtree(snapshot, prior, collectedBlocks,
|
referred.cleanSubtree(snapshot, prior, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes, true);
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
LOG.error("should not exceed quota while snapshot deletion", e);
|
LOG.error("should not exceed quota while snapshot deletion", e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,8 @@ public class INodeSymlink extends INodeWithAdditionalFields {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange) {
|
||||||
if (snapshot == null && prior == null) {
|
if (snapshot == null && prior == null) {
|
||||||
destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,8 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
*/
|
*/
|
||||||
public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
|
public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
|
||||||
Snapshot prior, final N currentINode,
|
Snapshot prior, final N currentINode,
|
||||||
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId());
|
int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId());
|
||||||
|
|
||||||
|
@ -80,14 +81,14 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
diffs.get(snapshotIndex).setSnapshot(prior);
|
diffs.get(snapshotIndex).setSnapshot(prior);
|
||||||
} else {
|
} else {
|
||||||
removed = diffs.remove(0);
|
removed = diffs.remove(0);
|
||||||
counts.add(Quota.NAMESPACE, 1);
|
if (countDiffChange) {
|
||||||
// We add 1 to the namespace quota usage since we delete a diff.
|
counts.add(Quota.NAMESPACE, 1);
|
||||||
// The quota change will be propagated to
|
} else {
|
||||||
// 1) ancestors in the current tree, and
|
// the currentINode must be a descendant of a WithName node, which set
|
||||||
// 2) src tree of any renamed ancestor.
|
// countDiffChange to false. In that case we should count in the diff
|
||||||
// Because for 2) we do not calculate the number of diff for quota
|
// change when updating the quota usage in the current tree
|
||||||
// usage, we need to compensate this diff change for 2)
|
currentINode.addSpaceConsumed(-1, 0, false);
|
||||||
currentINode.addSpaceConsumedToRenameSrc(1, 0, false, snapshot.getId());
|
}
|
||||||
counts.add(removed.destroyDiffAndCollectBlocks(currentINode,
|
counts.add(removed.destroyDiffAndCollectBlocks(currentINode,
|
||||||
collectedBlocks, removedINodes));
|
collectedBlocks, removedINodes));
|
||||||
}
|
}
|
||||||
|
@ -98,8 +99,11 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
} else {
|
} else {
|
||||||
// combine the to-be-removed diff with its previous diff
|
// combine the to-be-removed diff with its previous diff
|
||||||
removed = diffs.remove(snapshotIndex);
|
removed = diffs.remove(snapshotIndex);
|
||||||
counts.add(Quota.NAMESPACE, 1);
|
if (countDiffChange) {
|
||||||
currentINode.addSpaceConsumedToRenameSrc(1, 0, false, snapshot.getId());
|
counts.add(Quota.NAMESPACE, 1);
|
||||||
|
} else {
|
||||||
|
currentINode.addSpaceConsumed(-1, 0, false);
|
||||||
|
}
|
||||||
if (previous.snapshotINode == null) {
|
if (previous.snapshotINode == null) {
|
||||||
previous.snapshotINode = removed.snapshotINode;
|
previous.snapshotINode = removed.snapshotINode;
|
||||||
} else if (removed.snapshotINode != null) {
|
} else if (removed.snapshotINode != null) {
|
||||||
|
@ -117,7 +121,7 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
/** Add an {@link AbstractINodeDiff} for the given snapshot. */
|
/** Add an {@link AbstractINodeDiff} for the given snapshot. */
|
||||||
final D addDiff(Snapshot latest, N currentINode)
|
final D addDiff(Snapshot latest, N currentINode)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
currentINode.addSpaceConsumed(1, 0, true, Snapshot.INVALID_ID);
|
currentINode.addSpaceConsumed(1, 0, true);
|
||||||
return addLast(createDiff(latest, currentINode));
|
return addLast(createDiff(latest, currentINode));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -319,21 +319,23 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
+ " from path " + this.getFullPathName()
|
+ " from path " + this.getFullPathName()
|
||||||
+ ": the snapshot does not exist.");
|
+ ": the snapshot does not exist.");
|
||||||
} else {
|
} else {
|
||||||
final Snapshot snapshot = snapshotsByNames.remove(i);
|
final Snapshot snapshot = snapshotsByNames.get(i);
|
||||||
Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
|
Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
|
||||||
try {
|
try {
|
||||||
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
|
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes, true);
|
||||||
INodeDirectory parent = getParent();
|
INodeDirectory parent = getParent();
|
||||||
if (parent != null) {
|
if (parent != null) {
|
||||||
// there will not be any WithName node corresponding to the deleted
|
// there will not be any WithName node corresponding to the deleted
|
||||||
// snapshot, thus only update the quota usage in the current tree
|
// snapshot, thus only update the quota usage in the current tree
|
||||||
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
||||||
-counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
|
-counts.get(Quota.DISKSPACE), true);
|
||||||
}
|
}
|
||||||
} catch(QuotaExceededException e) {
|
} catch(QuotaExceededException e) {
|
||||||
LOG.error("BUG: removeSnapshot increases namespace usage.", e);
|
LOG.error("BUG: removeSnapshot increases namespace usage.", e);
|
||||||
}
|
}
|
||||||
|
// remove from snapshotsByNames after successfully cleaning the subtree
|
||||||
|
snapshotsByNames.remove(i);
|
||||||
return snapshot;
|
return snapshot;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,10 @@ import java.util.ArrayDeque;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Deque;
|
import java.util.Deque;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||||
|
@ -658,7 +660,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
if (added && !removeDeletedChild) {
|
if (added && !removeDeletedChild) {
|
||||||
final Quota.Counts counts = deletedChild.computeQuotaUsage();
|
final Quota.Counts counts = deletedChild.computeQuotaUsage();
|
||||||
addSpaceConsumed(counts.get(Quota.NAMESPACE),
|
addSpaceConsumed(counts.get(Quota.NAMESPACE),
|
||||||
counts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID);
|
counts.get(Quota.DISKSPACE), false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,9 +694,12 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
|
Map<INode, INode> priorCreated = null;
|
||||||
|
Map<INode, INode> priorDeleted = null;
|
||||||
if (snapshot == null) { // delete the current directory
|
if (snapshot == null) { // delete the current directory
|
||||||
recordModification(prior, null);
|
recordModification(prior, null);
|
||||||
// delete everything in created list
|
// delete everything in created list
|
||||||
|
@ -706,8 +711,28 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
} else {
|
} else {
|
||||||
// update prior
|
// update prior
|
||||||
prior = getDiffs().updatePrior(snapshot, prior);
|
prior = getDiffs().updatePrior(snapshot, prior);
|
||||||
|
// if there is a snapshot diff associated with prior, we need to record
|
||||||
|
// its original created and deleted list before deleting post
|
||||||
|
if (prior != null) {
|
||||||
|
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
|
||||||
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
|
List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
|
||||||
|
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
||||||
|
priorCreated = new HashMap<INode, INode>(cList.size());
|
||||||
|
for (INode cNode : cList) {
|
||||||
|
priorCreated.put(cNode, cNode);
|
||||||
|
}
|
||||||
|
priorDeleted = new HashMap<INode, INode>(dList.size());
|
||||||
|
for (INode dNode : dList) {
|
||||||
|
priorDeleted.put(dNode, dNode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this,
|
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this,
|
||||||
collectedBlocks, removedINodes));
|
collectedBlocks, removedINodes, countDiffChange));
|
||||||
|
|
||||||
|
// check priorDiff again since it may be created during the diff deletion
|
||||||
if (prior != null) {
|
if (prior != null) {
|
||||||
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
|
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
|
||||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
|
@ -716,11 +741,17 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
// use null as prior in the cleanSubtree call. Files/directories that
|
// use null as prior in the cleanSubtree call. Files/directories that
|
||||||
// were created before "prior" will be covered by the later
|
// were created before "prior" will be covered by the later
|
||||||
// cleanSubtreeRecursively call.
|
// cleanSubtreeRecursively call.
|
||||||
for (INode cNode : priorDiff.getChildrenDiff().getList(
|
if (priorCreated != null) {
|
||||||
ListType.CREATED)) {
|
// we only check the node originally in prior's created list
|
||||||
counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks,
|
for (INode cNode : priorDiff.getChildrenDiff().getList(
|
||||||
removedINodes));
|
ListType.CREATED)) {
|
||||||
|
if (priorCreated.containsKey(cNode)) {
|
||||||
|
counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks,
|
||||||
|
removedINodes, countDiffChange));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// When a directory is moved from the deleted list of the posterior
|
// When a directory is moved from the deleted list of the posterior
|
||||||
// diff to the deleted list of this diff, we need to destroy its
|
// diff to the deleted list of this diff, we need to destroy its
|
||||||
// descendants that were 1) created after taking this diff and 2)
|
// descendants that were 1) created after taking this diff and 2)
|
||||||
|
@ -728,16 +759,19 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
|
|
||||||
// For files moved from posterior's deleted list, we also need to
|
// For files moved from posterior's deleted list, we also need to
|
||||||
// delete its snapshot copy associated with the posterior snapshot.
|
// delete its snapshot copy associated with the posterior snapshot.
|
||||||
|
|
||||||
for (INode dNode : priorDiff.getChildrenDiff().getList(
|
for (INode dNode : priorDiff.getChildrenDiff().getList(
|
||||||
ListType.DELETED)) {
|
ListType.DELETED)) {
|
||||||
counts.add(cleanDeletedINode(dNode, snapshot, prior,
|
if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
|
||||||
collectedBlocks, removedINodes));
|
counts.add(cleanDeletedINode(dNode, snapshot, prior,
|
||||||
|
collectedBlocks, removedINodes, countDiffChange));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks,
|
counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks,
|
||||||
removedINodes));
|
removedINodes, priorDeleted, countDiffChange));
|
||||||
|
|
||||||
if (isQuotaSet()) {
|
if (isQuotaSet()) {
|
||||||
this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE),
|
this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE),
|
||||||
|
@ -755,9 +789,11 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
* @param collectedBlocks Used to collect blocks for later deletion.
|
* @param collectedBlocks Used to collect blocks for later deletion.
|
||||||
* @return Quota usage update.
|
* @return Quota usage update.
|
||||||
*/
|
*/
|
||||||
private static Quota.Counts cleanDeletedINode(INode inode, final Snapshot post,
|
private static Quota.Counts cleanDeletedINode(INode inode,
|
||||||
final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
|
final Snapshot post, final Snapshot prior,
|
||||||
final List<INode> removedINodes) throws QuotaExceededException {
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
|
throws QuotaExceededException {
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
Deque<INode> queue = new ArrayDeque<INode>();
|
Deque<INode> queue = new ArrayDeque<INode>();
|
||||||
queue.addLast(inode);
|
queue.addLast(inode);
|
||||||
|
@ -766,7 +802,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
if (topNode instanceof INodeReference.WithName) {
|
if (topNode instanceof INodeReference.WithName) {
|
||||||
INodeReference.WithName wn = (INodeReference.WithName) topNode;
|
INodeReference.WithName wn = (INodeReference.WithName) topNode;
|
||||||
if (wn.getLastSnapshotId() >= post.getId()) {
|
if (wn.getLastSnapshotId() >= post.getId()) {
|
||||||
wn.cleanSubtree(post, prior, collectedBlocks, removedINodes);
|
wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
|
||||||
|
countDiffChange);
|
||||||
}
|
}
|
||||||
// For DstReference node, since the node is not in the created list of
|
// For DstReference node, since the node is not in the created list of
|
||||||
// prior, we should treat it as regular file/dir
|
// prior, we should treat it as regular file/dir
|
||||||
|
@ -774,20 +811,28 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
&& topNode.asFile() instanceof FileWithSnapshot) {
|
&& topNode.asFile() instanceof FileWithSnapshot) {
|
||||||
FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile();
|
FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile();
|
||||||
counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior,
|
counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior,
|
||||||
topNode.asFile(), collectedBlocks, removedINodes));
|
topNode.asFile(), collectedBlocks, removedINodes, countDiffChange));
|
||||||
} else if (topNode.isDirectory()) {
|
} else if (topNode.isDirectory()) {
|
||||||
INodeDirectory dir = topNode.asDirectory();
|
INodeDirectory dir = topNode.asDirectory();
|
||||||
|
ChildrenDiff priorChildrenDiff = null;
|
||||||
if (dir instanceof INodeDirectoryWithSnapshot) {
|
if (dir instanceof INodeDirectoryWithSnapshot) {
|
||||||
// delete files/dirs created after prior. Note that these
|
// delete files/dirs created after prior. Note that these
|
||||||
// files/dirs, along with inode, were deleted right after post.
|
// files/dirs, along with inode, were deleted right after post.
|
||||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
|
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
|
||||||
DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior);
|
DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior);
|
||||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
counts.add(priorDiff.diff.destroyCreatedList(sdir,
|
priorChildrenDiff = priorDiff.getChildrenDiff();
|
||||||
|
counts.add(priorChildrenDiff.destroyCreatedList(sdir,
|
||||||
collectedBlocks, removedINodes));
|
collectedBlocks, removedINodes));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (INode child : dir.getChildrenList(prior)) {
|
for (INode child : dir.getChildrenList(prior)) {
|
||||||
|
if (priorChildrenDiff != null
|
||||||
|
&& priorChildrenDiff.search(ListType.DELETED,
|
||||||
|
child.getLocalNameBytes()) != null) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
queue.addLast(child);
|
queue.addLast(child);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -864,29 +909,39 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
if (inode instanceof INodeReference.WithName && snapshot != null) {
|
if (inode instanceof INodeReference.WithName && snapshot != null) {
|
||||||
// this inode has been renamed before the deletion of the DstReference
|
// this inode has been renamed before the deletion of the DstReference
|
||||||
// subtree
|
// subtree
|
||||||
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes);
|
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
|
||||||
|
true);
|
||||||
} else {
|
} else {
|
||||||
// for DstReference node, continue this process to its subtree
|
// for DstReference node, continue this process to its subtree
|
||||||
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
|
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
|
||||||
prior, collectedBlocks, removedINodes);
|
prior, collectedBlocks, removedINodes);
|
||||||
}
|
}
|
||||||
} else if (inode.isFile() && snapshot != null) {
|
} else if (inode.isFile() && snapshot != null) {
|
||||||
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes);
|
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
|
||||||
} else if (inode.isDirectory()) {
|
} else if (inode.isDirectory()) {
|
||||||
|
Map<INode, INode> excludedNodes = null;
|
||||||
if (inode instanceof INodeDirectoryWithSnapshot) {
|
if (inode instanceof INodeDirectoryWithSnapshot) {
|
||||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode;
|
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode;
|
||||||
DirectoryDiffList diffList = sdir.getDiffs();
|
DirectoryDiffList diffList = sdir.getDiffs();
|
||||||
if (snapshot != null) {
|
if (snapshot != null) {
|
||||||
diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks,
|
diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes, true);
|
||||||
}
|
}
|
||||||
DirectoryDiff priorDiff = diffList.getDiff(prior);
|
DirectoryDiff priorDiff = diffList.getDiff(prior);
|
||||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
priorDiff.diff.destroyCreatedList(sdir, collectedBlocks,
|
priorDiff.diff.destroyCreatedList(sdir, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes);
|
||||||
|
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
||||||
|
excludedNodes = new HashMap<INode, INode>(dList.size());
|
||||||
|
for (INode dNode : dList) {
|
||||||
|
excludedNodes.put(dNode, dNode);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (INode child : inode.asDirectory().getChildrenList(prior)) {
|
for (INode child : inode.asDirectory().getChildrenList(prior)) {
|
||||||
|
if (excludedNodes != null && excludedNodes.containsKey(child)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
destroyDstSubtree(child, snapshot, prior, collectedBlocks,
|
destroyDstSubtree(child, snapshot, prior, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes);
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,8 @@ public class INodeFileUnderConstructionWithSnapshot
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
if (snapshot == null) { // delete the current file
|
if (snapshot == null) { // delete the current file
|
||||||
recordModification(prior, null);
|
recordModification(prior, null);
|
||||||
|
@ -114,7 +115,7 @@ public class INodeFileUnderConstructionWithSnapshot
|
||||||
} else { // delete a snapshot
|
} else { // delete a snapshot
|
||||||
prior = getDiffs().updatePrior(snapshot, prior);
|
prior = getDiffs().updatePrior(snapshot, prior);
|
||||||
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
|
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes, countDiffChange);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,8 @@ public class INodeFileWithSnapshot extends INodeFile
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
if (snapshot == null) { // delete the current file
|
if (snapshot == null) { // delete the current file
|
||||||
recordModification(prior, null);
|
recordModification(prior, null);
|
||||||
|
@ -101,7 +102,7 @@ public class INodeFileWithSnapshot extends INodeFile
|
||||||
} else { // delete a snapshot
|
} else { // delete a snapshot
|
||||||
prior = getDiffs().updatePrior(snapshot, prior);
|
prior = getDiffs().updatePrior(snapshot, prior);
|
||||||
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
|
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
|
||||||
removedINodes);
|
removedINodes, countDiffChange);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -454,7 +454,7 @@ public class TestRenameWithSnapshots {
|
||||||
|
|
||||||
// delete snapshot s5. The diff of s5 should be combined to s4
|
// delete snapshot s5. The diff of s5 should be combined to s4
|
||||||
hdfs.deleteSnapshot(sdir1, "s5");
|
hdfs.deleteSnapshot(sdir1, "s5");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
assertFalse(hdfs.exists(bar2_s5));
|
assertFalse(hdfs.exists(bar2_s5));
|
||||||
final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
|
final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
|
||||||
"foo/bar2");
|
"foo/bar2");
|
||||||
|
@ -492,7 +492,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertFalse(hdfs.exists(bar3_s2));
|
assertFalse(hdfs.exists(bar3_s2));
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// delete snapshot s2.
|
// delete snapshot s2.
|
||||||
hdfs.deleteSnapshot(sdir2, "s2");
|
hdfs.deleteSnapshot(sdir2, "s2");
|
||||||
|
@ -500,14 +500,15 @@ public class TestRenameWithSnapshots {
|
||||||
assertFalse(hdfs.exists(bar2_s2));
|
assertFalse(hdfs.exists(bar2_s2));
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
hdfs.deleteSnapshot(sdir1, "s3");
|
hdfs.deleteSnapshot(sdir1, "s3");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
hdfs.deleteSnapshot(sdir1, "s1");
|
hdfs.deleteSnapshot(sdir1, "s1");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void restartClusterAndCheckImage() throws IOException {
|
private void restartClusterAndCheckImage(boolean compareQuota)
|
||||||
|
throws IOException {
|
||||||
File fsnBefore = new File(testDir, "dumptree_before");
|
File fsnBefore = new File(testDir, "dumptree_before");
|
||||||
File fsnMiddle = new File(testDir, "dumptree_middle");
|
File fsnMiddle = new File(testDir, "dumptree_middle");
|
||||||
File fsnAfter = new File(testDir, "dumptree_after");
|
File fsnAfter = new File(testDir, "dumptree_after");
|
||||||
|
@ -538,8 +539,10 @@ public class TestRenameWithSnapshots {
|
||||||
// dump the namespace loaded from fsimage
|
// dump the namespace loaded from fsimage
|
||||||
SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter);
|
SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter);
|
||||||
|
|
||||||
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, true);
|
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle,
|
||||||
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
|
compareQuota);
|
||||||
|
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter,
|
||||||
|
compareQuota);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -579,7 +582,7 @@ public class TestRenameWithSnapshots {
|
||||||
|
|
||||||
// delete snapshot s5.
|
// delete snapshot s5.
|
||||||
hdfs.deleteSnapshot(sdir1, "s5");
|
hdfs.deleteSnapshot(sdir1, "s5");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
assertFalse(hdfs.exists(foo_s5));
|
assertFalse(hdfs.exists(foo_s5));
|
||||||
status = hdfs.getFileStatus(foo_s4);
|
status = hdfs.getFileStatus(foo_s4);
|
||||||
assertEquals(REPL_1, status.getReplication());
|
assertEquals(REPL_1, status.getReplication());
|
||||||
|
@ -604,18 +607,18 @@ public class TestRenameWithSnapshots {
|
||||||
.getLocalName());
|
.getLocalName());
|
||||||
|
|
||||||
// restart cluster
|
// restart cluster
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// delete snapshot s2.
|
// delete snapshot s2.
|
||||||
hdfs.deleteSnapshot(sdir2, "s2");
|
hdfs.deleteSnapshot(sdir2, "s2");
|
||||||
assertFalse(hdfs.exists(foo_s2));
|
assertFalse(hdfs.exists(foo_s2));
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
hdfs.deleteSnapshot(sdir1, "s3");
|
hdfs.deleteSnapshot(sdir1, "s3");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
hdfs.deleteSnapshot(sdir1, "s1");
|
hdfs.deleteSnapshot(sdir1, "s1");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -650,7 +653,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.rename(bar2_dir1, bar2_dir2);
|
hdfs.rename(bar2_dir1, bar2_dir2);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// modification on /dir2/foo and /dir2/bar
|
// modification on /dir2/foo and /dir2/bar
|
||||||
final Path bar1_dir2 = new Path(foo_dir2, "bar1");
|
final Path bar1_dir2 = new Path(foo_dir2, "bar1");
|
||||||
|
@ -686,7 +689,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.rename(bar2_dir2, bar2_dir3);
|
hdfs.rename(bar2_dir2, bar2_dir3);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// modification on /dir3/foo and /dir3/bar
|
// modification on /dir3/foo and /dir3/bar
|
||||||
final Path bar1_dir3 = new Path(foo_dir3, "bar1");
|
final Path bar1_dir3 = new Path(foo_dir3, "bar1");
|
||||||
|
@ -718,7 +721,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.rename(bar2_dir3, bar2_dir2);
|
hdfs.rename(bar2_dir3, bar2_dir2);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// modification on /dir2/foo
|
// modification on /dir2/foo
|
||||||
hdfs.setReplication(bar1_dir2, REPL);
|
hdfs.setReplication(bar1_dir2, REPL);
|
||||||
|
@ -773,14 +776,14 @@ public class TestRenameWithSnapshots {
|
||||||
.getLocalName());
|
.getLocalName());
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// delete foo
|
// delete foo
|
||||||
hdfs.delete(foo_dir1, true);
|
hdfs.delete(foo_dir1, true);
|
||||||
hdfs.delete(bar2_dir1, true);
|
hdfs.delete(bar2_dir1, true);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// check
|
// check
|
||||||
assertTrue(hdfs.exists(bar1_s1));
|
assertTrue(hdfs.exists(bar1_s1));
|
||||||
|
@ -844,7 +847,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.setReplication(bar_dir2, REPL_1);
|
hdfs.setReplication(bar_dir2, REPL_1);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// create snapshots
|
// create snapshots
|
||||||
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11");
|
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11");
|
||||||
|
@ -863,7 +866,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.setReplication(bar_dir3, REPL_2);
|
hdfs.setReplication(bar_dir3, REPL_2);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// create snapshots
|
// create snapshots
|
||||||
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111");
|
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111");
|
||||||
|
@ -917,7 +920,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.setReplication(bar_dir2, REPL);
|
hdfs.setReplication(bar_dir2, REPL);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// create snapshots
|
// create snapshots
|
||||||
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111");
|
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111");
|
||||||
|
@ -999,14 +1002,14 @@ public class TestRenameWithSnapshots {
|
||||||
assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName());
|
assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName());
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// delete foo
|
// delete foo
|
||||||
hdfs.delete(foo_dir1, true);
|
hdfs.delete(foo_dir1, true);
|
||||||
hdfs.delete(bar_dir1, true);
|
hdfs.delete(bar_dir1, true);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// check
|
// check
|
||||||
final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111",
|
final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111",
|
||||||
|
@ -1127,7 +1130,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.rename(foo, newfoo);
|
hdfs.rename(foo, newfoo);
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
final Path bar2 = new Path(newfoo, "bar2");
|
final Path bar2 = new Path(newfoo, "bar2");
|
||||||
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
|
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
|
||||||
|
@ -1145,7 +1148,7 @@ public class TestRenameWithSnapshots {
|
||||||
// delete snapshot s4. The diff of s4 should be combined to s3
|
// delete snapshot s4. The diff of s4 should be combined to s3
|
||||||
hdfs.deleteSnapshot(sdir1, "s4");
|
hdfs.deleteSnapshot(sdir1, "s4");
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
|
Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
|
||||||
assertFalse(hdfs.exists(bar_s3));
|
assertFalse(hdfs.exists(bar_s3));
|
||||||
|
@ -1175,18 +1178,18 @@ public class TestRenameWithSnapshots {
|
||||||
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
|
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
// restart the cluster and check fsimage
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
// delete snapshot s2.
|
// delete snapshot s2.
|
||||||
hdfs.deleteSnapshot(sdir2, "s2");
|
hdfs.deleteSnapshot(sdir2, "s2");
|
||||||
assertFalse(hdfs.exists(bar_s2));
|
assertFalse(hdfs.exists(bar_s2));
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
// make sure the whole referred subtree has been destroyed
|
// make sure the whole referred subtree has been destroyed
|
||||||
assertEquals(4, fsdir.getRoot().getNamespace());
|
assertEquals(4, fsdir.getRoot().getNamespace());
|
||||||
assertEquals(0, fsdir.getRoot().getDiskspace());
|
assertEquals(0, fsdir.getRoot().getDiskspace());
|
||||||
|
|
||||||
hdfs.deleteSnapshot(sdir1, "s1");
|
hdfs.deleteSnapshot(sdir1, "s1");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
assertEquals(3, fsdir.getRoot().getNamespace());
|
assertEquals(3, fsdir.getRoot().getNamespace());
|
||||||
assertEquals(0, fsdir.getRoot().getDiskspace());
|
assertEquals(0, fsdir.getRoot().getDiskspace());
|
||||||
}
|
}
|
||||||
|
@ -1232,7 +1235,7 @@ public class TestRenameWithSnapshots {
|
||||||
INode fooNode = fooRef.asFile();
|
INode fooNode = fooRef.asFile();
|
||||||
assertTrue(fooNode instanceof INodeFileWithSnapshot);
|
assertTrue(fooNode instanceof INodeFileWithSnapshot);
|
||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1724,7 +1727,7 @@ public class TestRenameWithSnapshots {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||||
.numDataNodes(REPL).build();
|
.numDataNodes(REPL).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1784,7 +1787,108 @@ public class TestRenameWithSnapshots {
|
||||||
final Path foo2 = new Path(bar2, "foo");
|
final Path foo2 = new Path(bar2, "foo");
|
||||||
hdfs.rename(foo, foo2);
|
hdfs.rename(foo, foo2);
|
||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
|
// delete snap1
|
||||||
|
hdfs.deleteSnapshot(sdir1, snap1);
|
||||||
|
|
||||||
|
restartClusterAndCheckImage(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* move a directory to its prior descedant
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRename2PreDescendant_2() throws Exception {
|
||||||
|
final Path root = new Path("/");
|
||||||
|
final Path sdir1 = new Path("/dir1");
|
||||||
|
final Path sdir2 = new Path("/dir2");
|
||||||
|
final Path foo = new Path(sdir1, "foo");
|
||||||
|
final Path bar = new Path(foo, "bar");
|
||||||
|
final Path file1InBar = new Path(bar, "file1");
|
||||||
|
final Path file2InBar = new Path(bar, "file2");
|
||||||
|
hdfs.mkdirs(bar);
|
||||||
|
hdfs.mkdirs(sdir2);
|
||||||
|
DFSTestUtil.createFile(hdfs, file1InBar, BLOCKSIZE, REPL, SEED);
|
||||||
|
DFSTestUtil.createFile(hdfs, file2InBar, BLOCKSIZE, REPL, SEED);
|
||||||
|
|
||||||
|
hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
|
||||||
|
// create snapshot on root
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
|
||||||
|
// delete file1InBar
|
||||||
|
hdfs.delete(file1InBar, true);
|
||||||
|
|
||||||
|
// create another snapshot on root
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, root, snap2);
|
||||||
|
// delete file2InBar
|
||||||
|
hdfs.delete(file2InBar, true);
|
||||||
|
|
||||||
|
// /dir1/foo/bar -> /dir2/bar
|
||||||
|
final Path bar2 = new Path(sdir2, "bar2");
|
||||||
|
hdfs.rename(bar, bar2);
|
||||||
|
|
||||||
|
// /dir1/foo -> /dir2/bar/foo
|
||||||
|
final Path foo2 = new Path(bar2, "foo2");
|
||||||
|
hdfs.rename(foo, foo2);
|
||||||
|
|
||||||
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
|
// delete snapshot snap2
|
||||||
|
hdfs.deleteSnapshot(root, snap2);
|
||||||
|
|
||||||
|
// after deleteing snap2, the WithName node "bar", which originally was
|
||||||
|
// stored in the deleted list of "foo" for snap2, is moved to its deleted
|
||||||
|
// list for snap1. In that case, it will not be counted when calculating
|
||||||
|
// quota for "foo". However, we do not update this quota usage change while
|
||||||
|
// deleting snap2.
|
||||||
|
restartClusterAndCheckImage(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* move a directory to its prior descedant
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRename2PreDescendant_3() throws Exception {
|
||||||
|
final Path root = new Path("/");
|
||||||
|
final Path sdir1 = new Path("/dir1");
|
||||||
|
final Path sdir2 = new Path("/dir2");
|
||||||
|
final Path foo = new Path(sdir1, "foo");
|
||||||
|
final Path bar = new Path(foo, "bar");
|
||||||
|
final Path fileInBar = new Path(bar, "file");
|
||||||
|
hdfs.mkdirs(bar);
|
||||||
|
hdfs.mkdirs(sdir2);
|
||||||
|
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
|
||||||
|
|
||||||
|
hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
|
|
||||||
|
// create snapshot on root
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
|
||||||
|
// delete fileInBar
|
||||||
|
hdfs.delete(fileInBar, true);
|
||||||
|
// create another snapshot on root
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, root, snap2);
|
||||||
|
|
||||||
|
// /dir1/foo/bar -> /dir2/bar
|
||||||
|
final Path bar2 = new Path(sdir2, "bar2");
|
||||||
|
hdfs.rename(bar, bar2);
|
||||||
|
|
||||||
|
// /dir1/foo -> /dir2/bar/foo
|
||||||
|
final Path foo2 = new Path(bar2, "foo2");
|
||||||
|
hdfs.rename(foo, foo2);
|
||||||
|
|
||||||
|
restartClusterAndCheckImage(true);
|
||||||
|
|
||||||
|
// delete snapshot snap1
|
||||||
|
hdfs.deleteSnapshot(root, snap1);
|
||||||
|
|
||||||
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1851,7 +1955,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertEquals(0, diff.getList(ListType.CREATED).size());
|
assertEquals(0, diff.getList(ListType.CREATED).size());
|
||||||
assertEquals(0, diff.getList(ListType.DELETED).size());
|
assertEquals(0, diff.getList(ListType.DELETED).size());
|
||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1928,7 +2032,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertSame(wc, wc2);
|
assertSame(wc, wc2);
|
||||||
assertSame(fooRef2, wc.getParentReference());
|
assertSame(fooRef2, wc.getParentReference());
|
||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2041,7 +2145,7 @@ public class TestRenameWithSnapshots {
|
||||||
.isEmpty());
|
.isEmpty());
|
||||||
assertTrue(barNode.getChildrenList(null).isEmpty());
|
assertTrue(barNode.getChildrenList(null).isEmpty());
|
||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2137,6 +2241,6 @@ public class TestRenameWithSnapshots {
|
||||||
assertSame(fooNode.asReference().getReferredINode(),
|
assertSame(fooNode.asReference().getReferredINode(),
|
||||||
fooNode_s2.getReferredINode());
|
fooNode_s2.getReferredINode());
|
||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue