HDFS-4441. Move INodeDirectoryWithSnapshot.Diff and the related classes to a package.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1438782 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-01-26 00:01:51 +00:00
parent 569d63d36c
commit a3bf208386
14 changed files with 649 additions and 567 deletions

View File

@ -124,3 +124,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should
be replaced with INodeFileWithSnapshot but not INodeFile. (Jing Zhao
via szetszwo)
HDFS-4441. Move INodeDirectoryWithSnapshot.Diff and the related classes to a
package. (szetszwo)

View File

@ -1218,33 +1218,17 @@ public class FSDirectory implements Closeable {
}
}
private void unprotectedReplaceINode(String path, INode oldnode,
INode newnode, Snapshot latest) throws IOException {
void unprotectedReplaceINodeFile(final String path, final INodeFile oldnode,
final INodeFile newnode, final Snapshot latest) {
Preconditions.checkState(hasWriteLock());
INodeDirectory parent = oldnode.getParent();
// Remove the node from the namespace
if (parent == null) {
final String mess
= "FSDirectory.unprotectedReplaceINode: failed to remove " + path;
NameNode.stateChangeLog.warn("DIR* " + mess);
throw new IOException(mess);
}
final INodeDirectory parent = oldnode.getParent();
final INode removed = parent.removeChild(oldnode, latest);
Preconditions.checkState(removed == oldnode,
"removed != oldnode=%s, removed=%s", oldnode, removed);
parent = oldnode.getParent();
oldnode.setParent(null);
parent.addChild(newnode, true, latest);
}
void unprotectedReplaceINodeFile(String path, INodeFile oldnode,
INodeFile newnode, Snapshot latest
) throws IOException, UnresolvedLinkException {
unprotectedReplaceINode(path, oldnode, newnode, latest);
newnode.setLocalName(oldnode.getLocalNameBytes());
/* Currently oldnode and newnode are assumed to contain the same
* blocks. Otherwise, blocks need to be removed from the blocksMap.

View File

@ -3296,14 +3296,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
leaseManager.removeLease(pendingFile.getClientName(), src);
if (latestSnapshot != null) {
if (!(pendingFile instanceof INodeFileUnderConstructionWithSnapshot)) {
// replace INodeFileUnderConstruction with
// INodeFileUnderConstructionWithSnapshot. This replacement does not
// need to be recorded in snapshot.
if (pendingFile.getClass() == INodeFileUnderConstruction.class) {
// Replace it with INodeFileUnderConstructionWithSnapshot.
// This replacement does not need to be recorded in snapshot.
INodeFileUnderConstructionWithSnapshot pendingFileWithSnaphsot =
new INodeFileUnderConstructionWithSnapshot(pendingFile);
dir.replaceINodeFile(src, pendingFile,
pendingFileWithSnaphsot, null);
dir.replaceINodeFile(src, pendingFile, pendingFileWithSnaphsot, null);
pendingFile = pendingFileWithSnaphsot;
}
pendingFile = (INodeFileUnderConstruction) pendingFile

View File

@ -35,13 +35,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.StringUtils;
@ -55,7 +54,7 @@ import com.google.common.primitives.SignedBytes;
* directory inodes.
*/
@InterfaceAudience.Private
public abstract class INode implements Comparable<byte[]> {
public abstract class INode implements Diff.Element<byte[]> {
public static final Log LOG = LogFactory.getLog(INode.class);
static final ReadOnlyList<INode> EMPTY_READ_ONLY_LIST
@ -83,16 +82,6 @@ public abstract class INode implements Comparable<byte[]> {
}
}
/** A triple of objects. */
public static class Triple<L, M, R> extends Pair<L, R> {
public final M middle;
public Triple(L left, M middle, R right) {
super(left, right);
this.middle = middle;
}
}
/** Wrapper of two counters for namespace consumed and diskspace consumed. */
static class DirCounts {
/** namespace count */
@ -406,6 +395,11 @@ public abstract class INode implements Comparable<byte[]> {
return name;
}
@Override
public byte[] getKey() {
return getLocalNameBytes();
}
/**
* Set local file name
*/
@ -686,35 +680,9 @@ public abstract class INode implements Comparable<byte[]> {
out.print(getObjectString());
out.print("), parent=");
out.print(parent == null? null: parent.getLocalName() + "/");
out.print(", permission=" + getFsPermission(snapshot) + ", group="
+ getGroupName(snapshot) + ", user=" + getUserName(snapshot));
if (!this.isDirectory()) {
if (this.isFile()) {
// print block information
String blocksInfo = ((INodeFile) this).printBlocksInfo();
out.print(", blocks=[" + blocksInfo + "]");
}
if (this instanceof FileWithSnapshot) {
if (this instanceof INodeFileSnapshot
|| this instanceof INodeFileUnderConstructionSnapshot) {
out.print(", computedSize="
+ ((INodeFileSnapshot) this).computeFileSize(true));
}
FileWithSnapshot nodeWithLink = (FileWithSnapshot) this;
FileWithSnapshot next = nodeWithLink.getNext();
// An INodeFileWithSnapshot whose next link pointing to itself should be
// equivalent with a normal INodeFile
if (!(this instanceof INodeFileWithSnapshot &&
((INodeFileWithSnapshot) this).getNext() == this)) {
out.print(", next="
+ (next != null ? next.asINodeFile().getObjectString() : "null"));
}
}
out.println();
} else {
final INodeDirectory dir = (INodeDirectory) this;
out.println(", size=" + dir.getChildrenList(snapshot).size());
}
out.print(", permission=" + getFsPermission(snapshot));
out.print(", group=" + getGroupName(snapshot));
out.print(", user=" + getUserName(snapshot));
}
/**

View File

@ -217,7 +217,7 @@ public class INodeDirectory extends INode {
}
/** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */
INodeFileWithSnapshot replaceINodeFile(final INodeFile child) {
INodeFileWithSnapshot replaceChild4INodeFileWithSnapshot(final INodeFile child) {
assertChildrenNonNull();
Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot),
"Child file is already an INodeFileWithLink, child=" + child);
@ -831,6 +831,8 @@ public class INodeDirectory extends INode {
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.println(", childrenSize=" + getChildrenList(snapshot).size());
if (prefix.length() >= 2) {
prefix.setLength(prefix.length() - 2);
prefix.append(" ");

View File

@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsAction;
@ -29,8 +31,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/** I-node for closed file. */
@ -119,7 +123,7 @@ public class INodeFile extends INode implements BlockCollection {
@Override
public Pair<? extends INodeFile, ? extends INodeFile> createSnapshotCopy() {
return parent.replaceINodeFile(this).createSnapshotCopy();
return parent.replaceChild4INodeFileWithSnapshot(this).createSnapshotCopy();
}
/** @return true unconditionally. */
@ -322,17 +326,22 @@ public class INodeFile extends INode implements BlockCollection {
return blocks == null ? 0 : blocks.length;
}
/**
* @return A String containing all the blockInfo
*/
String printBlocksInfo() {
if (blocks == null) {
return "";
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.print(", fileSize=" + computeFileSize(true));
out.print(", blocks=" + (blocks == null? null: Arrays.asList(blocks)));
if (this instanceof FileWithSnapshot) {
final FileWithSnapshot withSnapshot = (FileWithSnapshot) this;
final FileWithSnapshot next = withSnapshot.getNext();
// next link pointing to itself is equivalent to no link
if (withSnapshot.getNext() != this) {
out.print(", next="
+ (next != null ? next.asINodeFile().getObjectString() : "null"));
}
StringBuilder buffer = new StringBuilder();
for (BlockInfo blk : blocks) {
buffer.append(blk.toString() + " ");
}
return buffer.toString();
out.println();
}
}

View File

@ -17,9 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.PrintWriter;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
/**
* An {@link INode} representing a symbolic link.
@ -77,4 +80,11 @@ public class INodeSymlink extends INode {
summary[1]++; // Increment the file count
return summary;
}
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.println();
}
}

View File

@ -32,6 +32,9 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
@ -44,367 +47,18 @@ import com.google.common.base.Preconditions;
public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
/**
* The difference between the current state and a previous snapshot
* of an INodeDirectory.
*
* <pre>
* Two lists are maintained in the algorithm:
* - c-list for newly created inodes
* - d-list for the deleted inodes
*
* Denote the state of an inode by the following
* (0, 0): neither in c-list nor d-list
* (c, 0): in c-list but not in d-list
* (0, d): in d-list but not in c-list
* (c, d): in both c-list and d-list
*
* For each case below, ( , ) at the end shows the result state of the inode.
*
* Case 1. Suppose the inode i is NOT in the previous snapshot. (0, 0)
* 1.1. create i in current: add it to c-list (c, 0)
* 1.1.1. create i in current and then create: impossible
* 1.1.2. create i in current and then delete: remove it from c-list (0, 0)
* 1.1.3. create i in current and then modify: replace it in c-list (c', 0)
*
* 1.2. delete i from current: impossible
*
* 1.3. modify i in current: impossible
*
* Case 2. Suppose the inode i is ALREADY in the previous snapshot. (0, 0)
* 2.1. create i in current: impossible
*
* 2.2. delete i from current: add it to d-list (0, d)
* 2.2.1. delete i from current and then create: add it to c-list (c, d)
* 2.2.2. delete i from current and then delete: impossible
* 2.2.2. delete i from current and then modify: impossible
*
* 2.3. modify i in current: put it in both c-list and d-list (c, d)
* 2.3.1. modify i in current and then create: impossible
* 2.3.2. modify i in current and then delete: remove it from c-list (0, d)
* 2.3.3. modify i in current and then modify: replace it in c-list (c', d)
* </pre>
* of the children list of an INodeDirectory.
*/
public static class Diff {
/**
* Search the inode from the list.
* @return -1 if the list is null; otherwise, return the insertion point
* defined in {@link Collections#binarySearch(List, Object)}.
* Note that, when the list is null, -1 is the correct insertion point.
*/
static int search(final List<INode> inodes, final INode i) {
return search(inodes, i.getLocalNameBytes());
}
private static int search(final List<INode> inodes, final byte[] name) {
return inodes == null? -1: Collections.binarySearch(inodes, name);
}
public static class ChildrenDiff extends Diff<byte[], INode> {
private ChildrenDiff() {}
private static void remove(final List<INode> inodes, final int i,
final INode expected) {
final INode removed = inodes.remove(-i - 1);
Preconditions.checkState(removed == expected,
"removed != expected=%s, removed=%s.", expected, removed);
}
/** c-list: inode(s) created in current. */
private List<INode> created;
/** d-list: inode(s) deleted from current. */
private List<INode> deleted;
INode searchCreated(final byte[] name) {
int cIndex = search(created, name);
return cIndex < 0 ? null : created.get(cIndex);
}
INode searchDeleted(final byte[] name) {
int dIndex = search(deleted, name);
return dIndex < 0 ? null : deleted.get(dIndex);
}
/**
* Insert the inode to created.
* @param i the insertion point defined
* in {@link Collections#binarySearch(List, Object)}
*/
private void insertCreated(final INode inode, final int i) {
if (i >= 0) {
throw new AssertionError("Inode already exists: inode=" + inode
+ ", created=" + created);
}
if (created == null) {
created = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
}
created.add(-i - 1, inode);
}
/**
* Insert the inode to deleted.
* @param i the insertion point defined
* in {@link Collections#binarySearch(List, Object)}
*/
private void insertDeleted(final INode inode, final int i) {
if (i >= 0) {
throw new AssertionError("Inode already exists: inode=" + inode
+ ", deleted=" + deleted);
}
if (deleted == null) {
deleted = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
}
deleted.add(-i - 1, inode);
}
/**
* Create an inode in current state.
* @return the c-list insertion point for undo.
*/
int create(final INode inode) {
final int c = search(created, inode);
insertCreated(inode, c);
return c;
}
void undoCreate(final INode inode, final int insertionPoint) {
remove(created, insertionPoint, inode);
}
/**
* Delete an inode from current state.
* @return a triple for undo.
*/
Triple<Integer, INode, Integer> delete(final INode inode) {
final int c = search(created, inode);
INode previous = null;
Integer d = null;
if (c >= 0) {
// remove a newly created inode
previous = created.remove(c);
} else {
// not in c-list, it must be in previous
d = search(deleted, inode);
insertDeleted(inode, d);
}
return new Triple<Integer, INode, Integer>(c, previous, d);
}
void undoDelete(final INode inode,
final Triple<Integer, INode, Integer> undoInfo) {
final int c = undoInfo.left;
if (c >= 0) {
created.add(c, undoInfo.middle);
} else {
remove(deleted, undoInfo.right, inode);
}
}
/**
* Modify an inode in current state.
* @return a triple for undo.
*/
Triple<Integer, INode, Integer> modify(final INode oldinode,
final INode newinode) {
if (!oldinode.equals(newinode)) {
throw new AssertionError("The names do not match: oldinode="
+ oldinode + ", newinode=" + newinode);
}
final int c = search(created, newinode);
INode previous = null;
Integer d = null;
if (c >= 0) {
// Case 1.1.3 and 2.3.3: inode is already in c-list,
previous = created.set(c, newinode);
//TODO: fix a bug that previous != oldinode. Set it to oldinode for now
previous = oldinode;
} else {
d = search(deleted, oldinode);
if (d < 0) {
// Case 2.3: neither in c-list nor d-list
insertCreated(newinode, c);
insertDeleted(oldinode, d);
}
}
return new Triple<Integer, INode, Integer>(c, previous, d);
}
void undoModify(final INode oldinode, final INode newinode,
final Triple<Integer, INode, Integer> undoInfo) {
final int c = undoInfo.left;
if (c >= 0) {
created.set(c, undoInfo.middle);
} else {
final int d = undoInfo.right;
if (d < 0) {
remove(created, c, newinode);
remove(deleted, d, oldinode);
}
}
}
/**
* Find an inode in the previous snapshot.
* @return null if the inode cannot be determined in the previous snapshot
* since no change is recorded and it should be determined in the
* current snapshot; otherwise, return an array with size one
* containing the inode in the previous snapshot. Note that the
* inode can possibly be null which means that the inode is not
* found in the previous snapshot.
*/
INode[] accessPrevious(byte[] name) {
return accessPrevious(name, created, deleted);
}
private static INode[] accessPrevious(byte[] name,
final List<INode> clist, final List<INode> dlist) {
final int d = search(dlist, name);
if (d >= 0) {
// the inode was in previous and was once deleted in current.
return new INode[]{dlist.get(d)};
} else {
final int c = search(clist, name);
// When c >= 0, the inode in current is a newly created inode.
return c >= 0? new INode[]{null}: null;
}
}
/**
* Find an inode in the current snapshot.
* @return null if the inode cannot be determined in the current snapshot
* since no change is recorded and it should be determined in the
* previous snapshot; otherwise, return an array with size one
* containing the inode in the current snapshot. Note that the
* inode can possibly be null which means that the inode is not
* found in the current snapshot.
*/
INode[] accessCurrent(byte[] name) {
return accessPrevious(name, deleted, created);
}
/**
* Apply this diff to previous snapshot in order to obtain current state.
* @return the current state of the list.
*/
List<INode> apply2Previous(final List<INode> previous) {
return apply2Previous(previous, created, deleted);
}
private static List<INode> apply2Previous(final List<INode> previous,
final List<INode> clist, final List<INode> dlist) {
final List<INode> current = new ArrayList<INode>(previous);
if (dlist != null) {
for(INode d : dlist) {
current.remove(d);
}
}
if (clist != null) {
for(INode c : clist) {
final int i = search(current, c);
current.add(-i - 1, c);
}
}
return current;
}
/**
* Apply the reverse of this diff to current state in order
* to obtain the previous snapshot.
* @return the previous state of the list.
*/
List<INode> apply2Current(final List<INode> current) {
return apply2Previous(current, deleted, created);
}
/**
* Combine the posterior diff with this diff. This function needs to called
* before the posterior diff is to be deleted. In general we have:
*
* <pre>
* 1. For (c, 0) in the posterior diff, check the inode in this diff:
* 1.1 (c', 0) in this diff: impossible
* 1.2 (0, d') in this diff: put in created --> (c, d')
* 1.3 (c', d') in this diff: impossible
* 1.4 (0, 0) in this diff: put in created --> (c, 0)
* This is the same logic with {@link #create(INode)}.
*
* 2. For (0, d) in the posterior diff,
* 2.1 (c', 0) in this diff: remove from old created --> (0, 0)
* 2.2 (0, d') in this diff: impossible
* 2.3 (c', d') in this diff: remove from old created --> (0, d')
* 2.4 (0, 0) in this diff: put in deleted --> (0, d)
* This is the same logic with {@link #delete(INode)}.
*
* 3. For (c, d) in the posterior diff,
* 3.1 (c', 0) in this diff: replace old created --> (c, 0)
* 3.2 (0, d') in this diff: impossible
* 3.3 (c', d') in this diff: replace old created --> (c, d')
* 3.4 (0, 0) in this diff: put in created and deleted --> (c, d)
* This is the same logic with {@link #modify(INode, INode)}.
* </pre>
*
* Note that after this function the postDiff will be deleted.
*
* @param the posterior diff to combine
* @param deletedINodeProcesser Used in case 2.1, 2.3, 3.1, and 3.3
* to process the deleted inodes.
*/
void combinePostDiff(Diff postDiff, Processor deletedINodeProcesser) {
final List<INode> postCreated = postDiff.created != null?
postDiff.created: Collections.<INode>emptyList();
final List<INode> postDeleted = postDiff.deleted != null?
postDiff.deleted: Collections.<INode>emptyList();
final Iterator<INode> createdIterator = postCreated.iterator();
final Iterator<INode> deletedIterator = postDeleted.iterator();
INode c = createdIterator.hasNext()? createdIterator.next(): null;
INode d = deletedIterator.hasNext()? deletedIterator.next(): null;
for(; c != null || d != null; ) {
final int cmp = c == null? 1
: d == null? -1
: c.compareTo(d.getLocalNameBytes());
if (cmp < 0) {
// case 1: only in c-list
create(c);
c = createdIterator.hasNext()? createdIterator.next(): null;
} else if (cmp > 0) {
// case 2: only in d-list
Triple<Integer, INode, Integer> triple = delete(d);
if (deletedINodeProcesser != null) {
deletedINodeProcesser.process(triple.middle);
}
d = deletedIterator.hasNext()? deletedIterator.next(): null;
} else {
// case 3: in both c-list and d-list
final Triple<Integer, INode, Integer> triple = modify(d, c);
if (deletedINodeProcesser != null) {
deletedINodeProcesser.process(triple.middle);
}
c = createdIterator.hasNext()? createdIterator.next(): null;
d = deletedIterator.hasNext()? deletedIterator.next(): null;
}
}
}
/** Convert the inode list to a compact string. */
static String toString(List<INode> inodes) {
if (inodes == null || inodes.isEmpty()) {
return "<empty>";
}
final StringBuilder b = new StringBuilder("[")
.append(inodes.get(0));
for(int i = 1; i < inodes.size(); i++) {
b.append(", ").append(inodes.get(i));
}
return b.append("]").toString();
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "{created=" + toString(created)
+ ", deleted=" + toString(deleted) + "}";
private ChildrenDiff(final List<INode> created, final List<INode> deleted) {
super(created, deleted);
}
/** Serialize {@link #created} */
private void writeCreated(DataOutput out) throws IOException {
if (created != null) {
final List<INode> created = getCreatedList();
out.writeInt(created.size());
for (INode node : created) {
// For INode in created list, we only need to record its local name
@ -412,23 +66,21 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
out.writeShort(name.length);
out.write(name);
}
} else {
out.writeInt(0);
}
}
/** Serialize {@link #deleted} */
private void writeDeleted(DataOutput out) throws IOException {
if (deleted != null) {
final List<INode> deleted = getDeletedList();
out.writeInt(deleted.size());
for (INode node : deleted) {
if (node.isDirectory()) {
FSImageSerialization.writeINodeDirectory((INodeDirectory) node, out);
} else { // INodeFile
final List<INode> created = getCreatedList();
// we write the block information only for INodeFile node when the
// node is only stored in the deleted list or the node is not a
// snapshot copy
int createdIndex = search(created, node);
int createdIndex = search(created, node.getKey());
if (createdIndex < 0) {
FSImageSerialization.writeINodeFile((INodeFile) node, out, true);
} else {
@ -446,9 +98,6 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
}
}
}
} else {
out.writeInt(0);
}
}
/** Serialize to out */
@ -460,13 +109,11 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
/** @return The list of INodeDirectory contained in the deleted list */
private List<INodeDirectory> getDirsInDeleted() {
List<INodeDirectory> dirList = new ArrayList<INodeDirectory>();
if (deleted != null) {
for (INode node : deleted) {
for (INode node : getDeletedList()) {
if (node.isDirectory()) {
dirList.add((INodeDirectory) node);
}
}
}
return dirList;
}
}
@ -502,7 +149,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
*/
private SnapshotDiff posteriorDiff;
/** The children list diff. */
private final Diff diff;
private final ChildrenDiff diff;
/** The snapshot inode data. It is null when there is no change. */
private INodeDirectory snapshotINode = null;
@ -511,7 +158,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
this.snapshot = snapshot;
this.childrenSize = dir.getChildrenList(null).size();
this.diff = new Diff();
this.diff = new ChildrenDiff();
}
/** Constructor used by FSImage loading */
@ -523,12 +170,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
this.childrenSize = childrenSize;
this.snapshotINode = snapshotINode;
this.posteriorDiff = posteriorDiff;
this.diff = new Diff();
diff.created = createdList;
diff.deleted = deletedList;
this.diff = new ChildrenDiff(createdList, deletedList);
}
Diff getDiff() {
ChildrenDiff getDiff() {
return diff;
}
@ -581,9 +226,9 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
private List<INode> initChildren() {
if (children == null) {
final Diff combined = new Diff();
final ChildrenDiff combined = new ChildrenDiff();
for(SnapshotDiff d = SnapshotDiff.this; d != null; d = d.posteriorDiff) {
combined.combinePostDiff(d.diff, null);
combined.combinePosterior(d.diff, null);
}
children = combined.apply2Current(ReadOnlyList.Util.asList(
INodeDirectoryWithSnapshot.this.getChildrenList(null)));
@ -616,10 +261,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
/** @return the child with the given name. */
INode getChild(byte[] name, boolean checkPosterior) {
for(SnapshotDiff d = this; ; d = d.posteriorDiff) {
final INode[] array = d.diff.accessPrevious(name);
if (array != null) {
// the diff is able to find it
return array[0];
final Container<INode> returned = d.diff.accessPrevious(name);
if (returned != null) {
// the diff is able to determine the inode
return returned.getElement();
} else if (!checkPosterior) {
// Since checkPosterior is false, return null, i.e. not found.
return null;
@ -668,12 +313,6 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
}
}
/** An interface for passing a method to process inodes. */
static interface Processor {
/** Process the given inode. */
void process(INode inode);
}
/** Create an {@link INodeDirectoryWithSnapshot} with the given snapshot.*/
public static INodeDirectoryWithSnapshot newInstance(INodeDirectory dir,
Snapshot latest) {
@ -718,7 +357,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
if (snapshotIndex > 0) {
// combine the to-be-removed diff with its previous diff
SnapshotDiff previousDiff = diffs.get(snapshotIndex - 1);
previousDiff.diff.combinePostDiff(diffToRemove.diff, new Processor() {
previousDiff.diff.combinePosterior(diffToRemove.diff,
new Diff.Processor<INode>() {
/** Collect blocks for deleted files. */
@Override
public void process(INode inode) {
@ -779,10 +419,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
}
/**
* Check if the latest {@link Diff} exists. If not, add it.
* @return the latest {@link Diff}, which is never null.
* Check if the latest {@link ChildrenDiff} exists. If not, add it.
* @return the latest {@link ChildrenDiff}, which is never null.
*/
Diff checkAndAddLatestDiff(Snapshot latest) {
ChildrenDiff checkAndAddLatestDiff(Snapshot latest) {
return checkAndAddLatestSnapshotDiff(latest).diff;
}
@ -845,8 +485,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
final Pair<? extends INode, ? extends INode> p = child.createSnapshotCopy();
if (p.left != p.right) {
final Triple<Integer, INode, Integer> triple = diff.diff.modify(p.right, p.left);
if (triple.middle != null && p.left instanceof FileWithSnapshot) {
final UndoInfo<INode> undoIndo = diff.diff.modify(p.right, p.left);
if (undoIndo.getTrashedElement() != null && p.left instanceof FileWithSnapshot) {
// also should remove oldinode from the circular list
FileWithSnapshot newNodeWithLink = (FileWithSnapshot) p.left;
FileWithSnapshot oldNodeWithLink = (FileWithSnapshot) p.right;
@ -859,7 +499,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
@Override
public boolean addChild(INode inode, boolean setModTime, Snapshot latest) {
Diff diff = null;
ChildrenDiff diff = null;
Integer undoInfo = null;
if (latest != null) {
diff = checkAndAddLatestDiff(latest);
@ -874,8 +514,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
@Override
public INode removeChild(INode child, Snapshot latest) {
Diff diff = null;
Triple<Integer, INode, Integer> undoInfo = null;
ChildrenDiff diff = null;
UndoInfo<INode> undoInfo = null;
if (latest != null) {
diff = checkAndAddLatestDiff(latest);
undoInfo = diff.delete(child);
@ -887,9 +527,9 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
diff.undoDelete(child, undoInfo);
} else {
//clean up the previously created file, if there is any.
final INode previous = undoInfo.middle;
if (previous != null && previous instanceof FileWithSnapshot) {
((FileWithSnapshot)previous).removeSelf();
final INode trashed = undoInfo.getTrashedElement();
if (trashed != null && trashed instanceof FileWithSnapshot) {
((FileWithSnapshot)trashed).removeSelf();
}
}
}

View File

@ -32,25 +32,22 @@ public class INodeFileUnderConstructionWithSnapshot
extends INodeFileUnderConstruction implements FileWithSnapshot {
private FileWithSnapshot next;
INodeFileUnderConstructionWithSnapshot(final FileWithSnapshot f,
INodeFileUnderConstructionWithSnapshot(final INodeFile f,
final String clientName,
final String clientMachine,
final DatanodeDescriptor clientNode) {
super(f.asINodeFile(), clientName, clientMachine, clientNode);
super(f, clientName, clientMachine, clientNode);
next = this;
}
/**
* The constructor that creates an
* {@link INodeFileUnderConstructionWithSnapshot} based on an
* {@link INodeFileUnderConstruction}
* Construct an {@link INodeFileUnderConstructionWithSnapshot} based on an
* {@link INodeFileUnderConstruction}.
*
* @param child The given {@link INodeFileUnderConstruction} instance
* @param f The given {@link INodeFileUnderConstruction} instance
*/
public INodeFileUnderConstructionWithSnapshot(
INodeFileUnderConstruction child) {
super(child, child.getClientName(), child.getClientMachine(), child
.getClientNode());
next = this;
public INodeFileUnderConstructionWithSnapshot(INodeFileUnderConstruction f) {
this(f, f.getClientName(), f.getClientMachine(), f.getClientNode());
}
@Override

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.SnapshotDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -108,12 +108,12 @@ public class SnapshotFSImageFormat {
}
/**
* Search the given {@link Diff} to find an inode matching the specific name.
* Search the given {@link ChildrenDiff} to find an inode matching the specific name.
* @param createdNodeName The name of the node for searching.
* @param diff The given {@link Diff} where to search the node.
* @param diff The given {@link ChildrenDiff} where to search the node.
* @return The matched inode. Return null if no matched inode can be found.
*/
private static INode findCreated(byte[] createdNodeName, Diff diff) {
private static INode findCreated(byte[] createdNodeName, ChildrenDiff diff) {
INode c = diff.searchCreated(createdNodeName);
INode d = diff.searchDeleted(createdNodeName);
if (c == null && d != null) {

View File

@ -0,0 +1,467 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot.diff;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Preconditions;
/**
* The difference between the current state and a previous state of a list.
*
* Given a previous state of a set and a sequence of create, delete and modify
* operations such that the current state of the set can be obtained by applying
* the operations on the previous state, the following algorithm construct the
* difference between the current state and the previous state of the set.
*
* <pre>
* Two lists are maintained in the algorithm:
* - c-list for newly created elements
* - d-list for the deleted elements
*
* Denote the state of an element by the following
* (0, 0): neither in c-list nor d-list
* (c, 0): in c-list but not in d-list
* (0, d): in d-list but not in c-list
* (c, d): in both c-list and d-list
*
* For each case below, ( , ) at the end shows the result state of the element.
*
* Case 1. Suppose the element i is NOT in the previous state. (0, 0)
* 1.1. create i in current: add it to c-list (c, 0)
* 1.1.1. create i in current and then create: impossible
* 1.1.2. create i in current and then delete: remove it from c-list (0, 0)
* 1.1.3. create i in current and then modify: replace it in c-list (c', 0)
*
* 1.2. delete i from current: impossible
*
* 1.3. modify i in current: impossible
*
* Case 2. Suppose the element i is ALREADY in the previous state. (0, 0)
* 2.1. create i in current: impossible
*
* 2.2. delete i from current: add it to d-list (0, d)
* 2.2.1. delete i from current and then create: add it to c-list (c, d)
* 2.2.2. delete i from current and then delete: impossible
* 2.2.2. delete i from current and then modify: impossible
*
* 2.3. modify i in current: put it in both c-list and d-list (c, d)
* 2.3.1. modify i in current and then create: impossible
* 2.3.2. modify i in current and then delete: remove it from c-list (0, d)
* 2.3.3. modify i in current and then modify: replace it in c-list (c', d)
* </pre>
*
* @param <K> The key type.
* @param <E> The element type, which must implement {@link Element} interface.
*/
public class Diff<K, E extends Diff.Element<K>> {
/** An interface for the elements in a {@link Diff}. */
public static interface Element<K> extends Comparable<K> {
/** @return the key of this object. */
public K getKey();
}
/** An interface for passing a method in order to process elements. */
public static interface Processor<E> {
/** Process the given element. */
public void process(E element);
}
/** Containing exactly one element. */
public static class Container<E> {
private final E element;
private Container(E element) {
this.element = element;
}
/** @return the element. */
public E getElement() {
return element;
}
}
/**
* Undo information for some operations such as {@link Diff#delete(E)}
* and {@link Diff#modify(Element, Element)}.
*/
public static class UndoInfo<E> {
private final int createdInsertionPoint;
private final E trashed;
private final Integer deletedInsertionPoint;
private UndoInfo(final int createdInsertionPoint, final E trashed,
final Integer deletedInsertionPoint) {
this.createdInsertionPoint = createdInsertionPoint;
this.trashed = trashed;
this.deletedInsertionPoint = deletedInsertionPoint;
}
public E getTrashedElement() {
return trashed;
}
}
private static final int DEFAULT_ARRAY_INITIAL_CAPACITY = 4;
/**
* Search the element from the list.
* @return -1 if the list is null; otherwise, return the insertion point
* defined in {@link Collections#binarySearch(List, Object)}.
* Note that, when the list is null, -1 is the correct insertion point.
*/
protected static <K, E extends Comparable<K>> int search(
final List<E> elements, final K name) {
return elements == null? -1: Collections.binarySearch(elements, name);
}
private static <E> void remove(final List<E> elements, final int i,
final E expected) {
final E removed = elements.remove(-i - 1);
Preconditions.checkState(removed == expected,
"removed != expected=%s, removed=%s.", expected, removed);
}
/** c-list: element(s) created in current. */
private List<E> created;
/** d-list: element(s) deleted from current. */
private List<E> deleted;
protected Diff() {}
protected Diff(final List<E> created, final List<E> deleted) {
this.created = created;
this.deleted = deleted;
}
/** @return the created list, which is never null. */
protected List<E> getCreatedList() {
return created == null? Collections.<E>emptyList(): created;
}
/** @return the deleted list, which is never null. */
protected List<E> getDeletedList() {
return deleted == null? Collections.<E>emptyList(): deleted;
}
/**
* @return null if the element is not found;
* otherwise, return the element in the c-list.
*/
public E searchCreated(final K name) {
final int c = search(created, name);
return c < 0 ? null : created.get(c);
}
/**
* @return null if the element is not found;
* otherwise, return the element in the d-list.
*/
public E searchDeleted(final K name) {
final int d = search(deleted, name);
return d < 0 ? null : deleted.get(d);
}
/**
* Insert the element to created.
* @param i the insertion point defined
* in {@link Collections#binarySearch(List, Object)}
*/
private void insertCreated(final E element, final int i) {
if (i >= 0) {
throw new AssertionError("Element already exists: element=" + element
+ ", created=" + created);
}
if (created == null) {
created = new ArrayList<E>(DEFAULT_ARRAY_INITIAL_CAPACITY);
}
created.add(-i - 1, element);
}
/**
* Insert the element to deleted.
* @param i the insertion point defined
* in {@link Collections#binarySearch(List, Object)}
*/
private void insertDeleted(final E element, final int i) {
if (i >= 0) {
throw new AssertionError("Element already exists: element=" + element
+ ", deleted=" + deleted);
}
if (deleted == null) {
deleted = new ArrayList<E>(DEFAULT_ARRAY_INITIAL_CAPACITY);
}
deleted.add(-i - 1, element);
}
/**
* Create an element in current state.
* @return the c-list insertion point for undo.
*/
public int create(final E element) {
final int c = search(created, element.getKey());
insertCreated(element, c);
return c;
}
/**
* Undo the previous {@link #create(E)} operation. Note that the behavior is
* undefined if the previous operation is not {@link #create(E)}.
*/
public void undoCreate(final E element, final int insertionPoint) {
remove(created, insertionPoint, element);
}
/**
* Delete an element from current state.
* @return the undo information.
*/
public UndoInfo<E> delete(final E element) {
final int c = search(created, element.getKey());
E previous = null;
Integer d = null;
if (c >= 0) {
// remove a newly created element
previous = created.remove(c);
} else {
// not in c-list, it must be in previous
d = search(deleted, element.getKey());
insertDeleted(element, d);
}
return new UndoInfo<E>(c, previous, d);
}
/**
* Undo the previous {@link #delete(E)} operation. Note that the behavior is
* undefined if the previous operation is not {@link #delete(E)}.
*/
public void undoDelete(final E element, final UndoInfo<E> undoInfo) {
final int c = undoInfo.createdInsertionPoint;
if (c >= 0) {
created.add(c, undoInfo.trashed);
} else {
remove(deleted, undoInfo.deletedInsertionPoint, element);
}
}
/**
* Modify an element in current state.
* @return the undo information.
*/
public UndoInfo<E> modify(final E oldElement, final E newElement) {
Preconditions.checkArgument(oldElement != newElement,
"They are the same object: oldElement == newElement = %s", newElement);
Preconditions.checkArgument(oldElement.equals(newElement),
"The names do not match: oldElement=%s, newElement=%s",
oldElement, newElement);
final int c = search(created, newElement.getKey());
E previous = null;
Integer d = null;
if (c >= 0) {
// Case 1.1.3 and 2.3.3: element is already in c-list,
previous = created.set(c, newElement);
//TODO: fix a bug that previous != oldElement.Set it to oldElement for now
previous = oldElement;
} else {
d = search(deleted, oldElement.getKey());
if (d < 0) {
// Case 2.3: neither in c-list nor d-list
insertCreated(newElement, c);
insertDeleted(oldElement, d);
}
}
return new UndoInfo<E>(c, previous, d);
}
/**
* Undo the previous {@link #modify(E, E)} operation. Note that the behavior
* is undefined if the previous operation is not {@link #modify(E, E)}.
*/
public void undoModify(final E oldElement, final E newElement,
final UndoInfo<E> undoInfo) {
final int c = undoInfo.createdInsertionPoint;
if (c >= 0) {
created.set(c, undoInfo.trashed);
} else {
final int d = undoInfo.deletedInsertionPoint;
if (d < 0) {
remove(created, c, newElement);
remove(deleted, d, oldElement);
}
}
}
/**
* Find an element in the previous state.
*
* @return null if the element cannot be determined in the previous state
* since no change is recorded and it should be determined in the
* current state; otherwise, return a {@link Container} containing the
* element in the previous state. Note that the element can possibly
* be null which means that the element is not found in the previous
* state.
*/
public Container<E> accessPrevious(final K name) {
return accessPrevious(name, created, deleted);
}
private static <K, E extends Diff.Element<K>> Container<E> accessPrevious(
final K name, final List<E> clist, final List<E> dlist) {
final int d = search(dlist, name);
if (d >= 0) {
// the element was in previous and was once deleted in current.
return new Container<E>(dlist.get(d));
} else {
final int c = search(clist, name);
// When c >= 0, the element in current is a newly created element.
return c < 0? null: new Container<E>(null);
}
}
/**
* Find an element in the current state.
*
* @return null if the element cannot be determined in the current state since
* no change is recorded and it should be determined in the previous
* state; otherwise, return a {@link Container} containing the element in
* the current state. Note that the element can possibly be null which
* means that the element is not found in the current state.
*/
public Container<E> accessCurrent(K name) {
return accessPrevious(name, deleted, created);
}
/**
* Apply this diff to previous state in order to obtain current state.
* @return the current state of the list.
*/
public List<E> apply2Previous(final List<E> previous) {
return apply2Previous(previous, getCreatedList(), getDeletedList());
}
private static <K, E extends Diff.Element<K>> List<E> apply2Previous(
final List<E> previous, final List<E> clist, final List<E> dlist) {
final List<E> current = new ArrayList<E>(previous);
for(E d : dlist) {
current.remove(d);
}
for(E c : clist) {
final int i = search(current, c.getKey());
current.add(-i - 1, c);
}
return current;
}
/**
* Apply the reverse of this diff to current state in order
* to obtain the previous state.
* @return the previous state of the list.
*/
public List<E> apply2Current(final List<E> current) {
return apply2Previous(current, getDeletedList(), getCreatedList());
}
/**
* Combine this diff with a posterior diff. We have the following cases:
*
* <pre>
* 1. For (c, 0) in the posterior diff, check the element in this diff:
* 1.1 (c', 0) in this diff: impossible
* 1.2 (0, d') in this diff: put in c-list --> (c, d')
* 1.3 (c', d') in this diff: impossible
* 1.4 (0, 0) in this diff: put in c-list --> (c, 0)
* This is the same logic as {@link #create(E)}.
*
* 2. For (0, d) in the posterior diff,
* 2.1 (c', 0) in this diff: remove from c-list --> (0, 0)
* 2.2 (0, d') in this diff: impossible
* 2.3 (c', d') in this diff: remove from c-list --> (0, d')
* 2.4 (0, 0) in this diff: put in d-list --> (0, d)
* This is the same logic as {@link #delete(E)}.
*
* 3. For (c, d) in the posterior diff,
* 3.1 (c', 0) in this diff: replace the element in c-list --> (c, 0)
* 3.2 (0, d') in this diff: impossible
* 3.3 (c', d') in this diff: replace the element in c-list --> (c, d')
* 3.4 (0, 0) in this diff: put in c-list and d-list --> (c, d)
* This is the same logic as {@link #modify(E, E)}.
* </pre>
*
* @param the posterior diff to combine with.
* @param deletedProcesser
* process the deleted/overwritten elements in case 2.1, 2.3, 3.1 and 3.3.
*/
public void combinePosterior(final Diff<K, E> posterior,
final Processor<E> deletedProcesser) {
final Iterator<E> createdIterator = posterior.getCreatedList().iterator();
final Iterator<E> deletedIterator = posterior.getDeletedList().iterator();
E c = createdIterator.hasNext()? createdIterator.next(): null;
E d = deletedIterator.hasNext()? deletedIterator.next(): null;
for(; c != null || d != null; ) {
final int cmp = c == null? 1
: d == null? -1
: c.compareTo(d.getKey());
if (cmp < 0) {
// case 1: only in c-list
create(c);
c = createdIterator.hasNext()? createdIterator.next(): null;
} else if (cmp > 0) {
// case 2: only in d-list
final UndoInfo<E> ui = delete(d);
if (deletedProcesser != null) {
deletedProcesser.process(ui.trashed);
}
d = deletedIterator.hasNext()? deletedIterator.next(): null;
} else {
// case 3: in both c-list and d-list
final UndoInfo<E> ui = modify(d, c);
if (deletedProcesser != null) {
deletedProcesser.process(ui.trashed);
}
c = createdIterator.hasNext()? createdIterator.next(): null;
d = deletedIterator.hasNext()? deletedIterator.next(): null;
}
}
}
/** Convert the element list to a compact string. */
static <E> String toString(List<E> elements) {
if (elements == null || elements.isEmpty()) {
return "<empty>";
}
final StringBuilder b = new StringBuilder("[")
.append(elements.get(0));
for(int i = 1; i < elements.size(); i++) {
b.append(", ").append(elements.get(i));
}
return b.append("]").toString();
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "{created=" + toString(created)
+ ", deleted=" + toString(deleted) + "}";
}
}

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -108,7 +108,7 @@ public class TestINodeFileUnderConstructionWithSnapshot {
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
Diff diff = dirNode.getLastSnapshotDiff().getDiff();
ChildrenDiff diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
INode nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes());
@ -187,7 +187,7 @@ public class TestINodeFileUnderConstructionWithSnapshot {
assertEquals(BLOCKSIZE * 2, ((INodeFile) fileNode).computeFileSize(true));
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
Diff diff = dirNode.getLastSnapshotDiff().getDiff();
ChildrenDiff diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 2,

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
@ -179,4 +181,37 @@ public class TestNestedSnapshots {
}
}
}
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test
public void testIdCmp() {
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0));
final INodeDirectory dir = new INodeDirectory(0, "foo", perm);
final INodeDirectorySnapshottable snapshottable
= new INodeDirectorySnapshottable(dir);
final Snapshot[] snapshots = {
new Snapshot(1, "s1", snapshottable),
new Snapshot(1, "s1", snapshottable),
new Snapshot(2, "s2", snapshottable),
new Snapshot(2, "s2", snapshottable),
};
Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
for(Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) < 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) > 0);
for(Snapshot t : snapshots) {
final int expected = s.getRoot().getLocalName().compareTo(
t.getRoot().getLocalName());
final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
Assert.assertEquals(expected > 0, computed > 0);
Assert.assertEquals(expected == 0, computed == 0);
Assert.assertEquals(expected < 0, computed < 0);
}
}
}
}

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
package org.apache.hadoop.hdfs.server.namenode.snapshot.diff;
import java.util.ArrayList;
import java.util.List;
@ -24,16 +24,16 @@ import java.util.Random;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.Triple;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
import org.junit.Assert;
import org.junit.Test;
/**
* Test {@link INodeDirectoryWithSnapshot}, especially, {@link Diff}.
* Test {@link Diff} with {@link INode}.
*/
public class TestINodeDirectoryWithSnapshot {
public class TestDiff {
private static final Random RANDOM = new Random();
private static final int UNDO_TEST_P = 10;
private static final PermissionStatus PERM = PermissionStatus.createImmutable(
@ -84,13 +84,13 @@ public class TestINodeDirectoryWithSnapshot {
// make modifications to current and record the diff
final List<INode> current = new ArrayList<INode>(previous);
final Diff[] diffs = new Diff[5];
for(int j = 0; j < diffs.length; j++) {
diffs[j] = new Diff();
final List<Diff<byte[], INode>> diffs = new ArrayList<Diff<byte[], INode>>();
for(int j = 0; j < 5; j++) {
diffs.add(new Diff<byte[], INode>());
}
for(int m = 0; m < numModifications; m++) {
final int j = m * diffs.length / numModifications;
final int j = m * diffs.size() / numModifications;
// if current is empty, the next operation must be create;
// otherwise, randomly pick an operation.
@ -99,19 +99,19 @@ public class TestINodeDirectoryWithSnapshot {
case 1: // create
{
final INode i = newINode(n++, width);
create(i, current, diffs[j]);
create(i, current, diffs.get(j));
break;
}
case 2: // delete
{
final INode i = current.get(RANDOM.nextInt(current.size()));
delete(i, current, diffs[j]);
delete(i, current, diffs.get(j));
break;
}
case 3: // modify
{
final INode i = current.get(RANDOM.nextInt(current.size()));
modify(i, current, diffs[j]);
modify(i, current, diffs.get(j));
break;
}
}
@ -120,8 +120,8 @@ public class TestINodeDirectoryWithSnapshot {
{
// check if current == previous + diffs
List<INode> c = previous;
for(int i = 0; i < diffs.length; i++) {
c = diffs[i].apply2Previous(c);
for(int i = 0; i < diffs.size(); i++) {
c = diffs.get(i).apply2Previous(c);
}
if (!hasIdenticalElements(current, c)) {
System.out.println("previous = " + Diff.toString(previous));
@ -133,8 +133,8 @@ public class TestINodeDirectoryWithSnapshot {
// check if previous == current - diffs
List<INode> p = current;
for(int i = diffs.length - 1; i >= 0; i--) {
p = diffs[i].apply2Current(p);
for(int i = diffs.size() - 1; i >= 0; i--) {
p = diffs.get(i).apply2Current(p);
}
if (!hasIdenticalElements(previous, p)) {
System.out.println("previous = " + Diff.toString(previous));
@ -146,9 +146,9 @@ public class TestINodeDirectoryWithSnapshot {
}
// combine all diffs
final Diff combined = diffs[0];
for(int i = 1; i < diffs.length; i++) {
combined.combinePostDiff(diffs[i], null);
final Diff<byte[], INode> combined = diffs.get(0);
for(int i = 1; i < diffs.size(); i++) {
combined.combinePosterior(diffs.get(i), null);
}
{
@ -177,32 +177,32 @@ public class TestINodeDirectoryWithSnapshot {
for(int m = 0; m < n; m++) {
final INode inode = newINode(m, width);
{// test accessPrevious
final INode[] array = combined.accessPrevious(inode.getLocalNameBytes());
final Container<INode> r = combined.accessPrevious(inode.getKey());
final INode computed;
if (array != null) {
computed = array[0];
if (r != null) {
computed = r.getElement();
} else {
final int i = Diff.search(current, inode);
final int i = Diff.search(current, inode.getKey());
computed = i < 0? null: current.get(i);
}
final int j = Diff.search(previous, inode);
final int j = Diff.search(previous, inode.getKey());
final INode expected = j < 0? null: previous.get(j);
// must be the same object (equals is not enough)
Assert.assertTrue(computed == expected);
}
{// test accessCurrent
final INode[] array = combined.accessCurrent(inode.getLocalNameBytes());
final Container<INode> r = combined.accessCurrent(inode.getKey());
final INode computed;
if (array != null) {
computed = array[0];
if (r != null) {
computed = r.getElement();
} else {
final int i = Diff.search(previous, inode);
final int i = Diff.search(previous, inode.getKey());
computed = i < 0? null: previous.get(i);
}
final int j = Diff.search(current, inode);
final int j = Diff.search(current, inode.getKey());
final INode expected = j < 0? null: current.get(j);
// must be the same object (equals is not enough)
Assert.assertTrue(computed == expected);
@ -228,7 +228,7 @@ public class TestINodeDirectoryWithSnapshot {
return true;
}
static String toString(Diff diff) {
static String toString(Diff<byte[], INode> diff) {
return diff.toString();
}
@ -247,8 +247,8 @@ public class TestINodeDirectoryWithSnapshot {
return new INodeDirectory(n, String.format("n%0" + width + "d", n), PERM);
}
static void create(INode inode, final List<INode> current, Diff diff) {
final int i = Diff.search(current, inode);
static void create(INode inode, final List<INode> current, Diff<byte[], INode> diff) {
final int i = Diff.search(current, inode.getKey());
Assert.assertTrue(i < 0);
current.add(-i - 1, inode);
if (diff != null) {
@ -273,8 +273,8 @@ public class TestINodeDirectoryWithSnapshot {
}
}
static void delete(INode inode, final List<INode> current, Diff diff) {
final int i = Diff.search(current, inode);
static void delete(INode inode, final List<INode> current, Diff<byte[], INode> diff) {
final int i = Diff.search(current, inode.getKey());
current.remove(i);
if (diff != null) {
//test undo with 1/UNDO_TEST_P probability
@ -284,7 +284,7 @@ public class TestINodeDirectoryWithSnapshot {
before = toString(diff);
}
final Triple<Integer, INode, Integer> undoInfo = diff.delete(inode);
final UndoInfo<INode> undoInfo = diff.delete(inode);
if (testUndo) {
final String after = toString(diff);
@ -298,8 +298,8 @@ public class TestINodeDirectoryWithSnapshot {
}
}
static void modify(INode inode, final List<INode> current, Diff diff) {
final int i = Diff.search(current, inode);
static void modify(INode inode, final List<INode> current, Diff<byte[], INode> diff) {
final int i = Diff.search(current, inode.getKey());
Assert.assertTrue(i >= 0);
final INodeDirectory oldinode = (INodeDirectory)current.get(i);
final INodeDirectory newinode = new INodeDirectory(oldinode, false);
@ -314,7 +314,7 @@ public class TestINodeDirectoryWithSnapshot {
before = toString(diff);
}
final Triple<Integer, INode, Integer> undoInfo = diff.modify(oldinode, newinode);
final UndoInfo<INode> undoInfo = diff.modify(oldinode, newinode);
if (testUndo) {
final String after = toString(diff);
@ -328,38 +328,7 @@ public class TestINodeDirectoryWithSnapshot {
}
}
static void assertDiff(String s, Diff diff) {
static void assertDiff(String s, Diff<byte[], INode> diff) {
Assert.assertEquals(s, toString(diff));
}
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test
public void testIdCmp() {
final INodeDirectory dir = new INodeDirectory(0, "foo", PERM);
final INodeDirectorySnapshottable snapshottable
= new INodeDirectorySnapshottable(dir);
final Snapshot[] snapshots = {
new Snapshot(1, "s1", snapshottable),
new Snapshot(1, "s1", snapshottable),
new Snapshot(2, "s2", snapshottable),
new Snapshot(2, "s2", snapshottable),
};
Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
for(Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) < 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) > 0);
for(Snapshot t : snapshots) {
final int expected = s.getRoot().getLocalName().compareTo(
t.getRoot().getLocalName());
final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
Assert.assertEquals(expected > 0, computed > 0);
Assert.assertEquals(expected == 0, computed == 0);
Assert.assertEquals(expected < 0, computed < 0);
}
}
}
}