diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index fd416f72a3a..0e8c3a4a993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.visitor.NamespaceVisitor; import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.ChunkedArrayList; @@ -76,7 +77,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { } /** Get the {@link PermissionStatus} */ - abstract PermissionStatus getPermissionStatus(int snapshotId); + public abstract PermissionStatus getPermissionStatus(int snapshotId); /** The same as getPermissionStatus(null). */ final PermissionStatus getPermissionStatus() { @@ -1123,6 +1124,14 @@ public abstract class INode implements INodeAttributes, Diff.Element { } } + /** Accept a visitor to visit this {@link INode}. */ + public void accept(NamespaceVisitor visitor, int snapshot) { + final Class clazz = visitor != null? visitor.getClass() + : NamespaceVisitor.class; + throw new UnsupportedOperationException(getClass().getSimpleName() + + " does not support " + clazz.getSimpleName()); + } + /** * INode feature such as {@link FileUnderConstructionFeature} * and {@link DirectoryWithQuotaFeature}. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 5f44da8792b..3836d79c6a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; +import org.apache.hadoop.hdfs.server.namenode.visitor.NamespaceVisitor; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList; @@ -996,6 +997,11 @@ public class INodeDirectory extends INodeWithAdditionalFields } } + @Override + public void accept(NamespaceVisitor visitor, int snapshot) { + visitor.visitDirectoryRecursively(this, snapshot); + } + public final int getChildrenNum(final int snapshotId) { return getChildrenList(snapshotId).size(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 3f0208c4dfb..eb4042957f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList; +import org.apache.hadoop.hdfs.server.namenode.visitor.NamespaceVisitor; import org.apache.hadoop.hdfs.util.LongBitFormat; import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID; @@ -1111,6 +1112,11 @@ public class INodeFile extends INodeWithAdditionalFields out.println(); } + @Override + public void accept(NamespaceVisitor visitor, int snapshot) { + visitor.visitFile(this, snapshot); + } + /** * Remove full blocks at the end file up to newLength * @return sum of sizes of the remained blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 69a92706ab5..a9941677134 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.namenode.visitor.NamespaceVisitor; import org.apache.hadoop.security.AccessControlException; /** @@ -368,7 +369,12 @@ public abstract class INodeReference extends INode { b.append("->"); getReferredINode().dumpTreeRecursively(out, b, snapshot); } - + + @Override + public void accept(NamespaceVisitor visitor, int snapshot) { + visitor.visitReferenceRecursively(this, snapshot); + } + public int getDstSnapshotId() { return Snapshot.CURRENT_STATE_ID; } @@ -399,7 +405,7 @@ public abstract class INodeReference extends INode { INodeReferenceValidation.add(this, WithCount.class); } - private String getCountDetails() { + public String getCountDetails() { final StringBuilder b = new StringBuilder("["); if (!withNameList.isEmpty()) { final Iterator i = withNameList.iterator(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index c76bea090f1..45f4d86edd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.visitor.NamespaceVisitor; /** * An {@link INode} representing a symbolic link. @@ -104,7 +105,13 @@ public class INodeSymlink extends INodeWithAdditionalFields { public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, final int snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); - out.println(); + out.print(" ~> "); + out.println(getSymlinkString()); + } + + @Override + public void accept(NamespaceVisitor visitor, int snapshot) { + visitor.visitSymlink(this, snapshot); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index b7d2f2c1e5a..e0c6c6422ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -178,7 +178,7 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final PermissionStatus getPermissionStatus(int snapshotId) { + public final PermissionStatus getPermissionStatus(int snapshotId) { return new PermissionStatus(getUserName(snapshotId), getGroupName(snapshotId), getFsPermission(snapshotId)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index f76738ffa29..b8f7b65ea7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -166,7 +166,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { this.isSnapshotRoot = true; } - boolean isSnapshotRoot() { + public boolean isSnapshotRoot() { return isSnapshotRoot; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespacePrintVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespacePrintVisitor.java new file mode 100644 index 00000000000..0294e62bbdf --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespacePrintVisitor.java @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.visitor; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeReference; +import org.apache.hadoop.hdfs.server.namenode.INodeSymlink; +import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature; +import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; +import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.io.StringWriter; + +/** + * To print the namespace tree recursively for testing. + * + * \- foo (INodeDirectory@33dd2717) + * \- sub1 (INodeDirectory@442172) + * +- file1 (INodeFile@78392d4) + * +- file2 (INodeFile@78392d5) + * +- sub11 (INodeDirectory@8400cff) + * \- file3 (INodeFile@78392d6) + * \- z_file4 (INodeFile@45848712) + */ +public class NamespacePrintVisitor implements NamespaceVisitor { + static final String NON_LAST_ITEM = "+-"; + static final String LAST_ITEM = "\\-"; + + /** Print the tree from the given root to a {@link File}. */ + public static void print2File(INode root, File f) throws IOException { + try(final PrintWriter out = new PrintWriter(new FileWriter(f), true)) { + new NamespacePrintVisitor(out).print(root); + } + } + + /** @return string of the tree in the given {@link FSNamesystem}. */ + public static String print2Sting(FSNamesystem ns) { + return print2Sting(ns.getFSDirectory().getRoot()); + } + + /** @return string of the tree from the given root. */ + public static String print2Sting(INode root) { + final StringWriter out = new StringWriter(); + new NamespacePrintVisitor(new PrintWriter(out)).print(root); + return out.getBuffer().toString(); + } + + /** + * Print the tree in the given {@link FSNamesystem} + * to the given {@link PrintStream}. + */ + public static void print(FSNamesystem ns, PrintStream out) { + new NamespacePrintVisitor(new PrintWriter(out)).print(ns); + } + + private final PrintWriter out; + private final StringBuffer prefix = new StringBuffer(); + + private NamespacePrintVisitor(PrintWriter out) { + this.out = out; + } + + private void print(FSNamesystem namesystem) { + print(namesystem.getFSDirectory().getRoot()); + } + + private void print(INode root) { + root.accept(this, Snapshot.CURRENT_STATE_ID); + } + + private void printINode(INode iNode, int snapshot) { + out.print(prefix); + out.print(" "); + final String name = iNode.getLocalName(); + out.print(name != null && name.isEmpty()? "/": name); + out.print(" ("); + out.print(iNode.getObjectString()); + out.print("), "); + out.print(iNode.getParentString()); + out.print(", " + iNode.getPermissionStatus(snapshot)); + } + + @Override + public void visitFile(INodeFile file, int snapshot) { + printINode(file, snapshot); + + out.print(", fileSize=" + file.computeFileSize(snapshot)); + // print only the first block, if it exists + out.print(", blocks="); + final BlockInfo[] blocks = file.getBlocks(); + out.print(blocks.length == 0 ? null: blocks[0]); + out.println(); + + final FileWithSnapshotFeature snapshotFeature + = file.getFileWithSnapshotFeature(); + if (snapshotFeature != null) { + if (prefix.length() >= 2) { + prefix.setLength(prefix.length() - 2); + prefix.append(" "); + } + out.print(prefix); + out.print(snapshotFeature); + } + out.println(); + } + + @Override + public void visitSymlink(INodeSymlink symlink, int snapshot) { + printINode(symlink, snapshot); + out.print(" ~> "); + out.println(symlink.getSymlinkString()); + } + + @Override + public void visitReference(INodeReference ref, int snapshot) { + printINode(ref, snapshot); + + if (ref instanceof INodeReference.DstReference) { + out.print(", dstSnapshotId=" + ref.getDstSnapshotId()); + } else if (ref instanceof INodeReference.WithCount) { + out.print(", " + ((INodeReference.WithCount)ref).getCountDetails()); + } + out.println(); + } + + @Override + public void preVisitReferred(INode referred) { + prefix.setLength(prefix.length() - 2); + prefix.append(" ->"); + } + + @Override + public void postVisitReferred(INode referred) { + prefix.setLength(prefix.length() - 2); + } + + @Override + public void visitDirectory(INodeDirectory dir, int snapshot) { + printINode(dir, snapshot); + + out.print(", childrenSize=" + dir.getChildrenList(snapshot).size()); + final DirectoryWithQuotaFeature q = dir.getDirectoryWithQuotaFeature(); + if (q != null) { + out.print(", " + q); + } + if (dir instanceof Snapshot.Root) { + out.print(", snapshotId=" + snapshot); + } + out.println(); + + if (prefix.length() >= 2) { + prefix.setLength(prefix.length() - 2); + prefix.append(" "); + } + + final DirectoryWithSnapshotFeature snapshotFeature + = dir.getDirectoryWithSnapshotFeature(); + if (snapshotFeature != null) { + out.print(prefix); + out.print(snapshotFeature); + } + out.println(); + } + + @Override + public void visitSnapshottable(INodeDirectory dir, + DirectorySnapshottableFeature snapshottable) { + out.println(); + out.print(prefix); + + out.print("Snapshot of "); + final String name = dir.getLocalName(); + out.print(name != null && name.isEmpty()? "/": name); + out.print(": quota="); + out.print(snapshottable.getSnapshotQuota()); + + int n = 0; + for(DirectoryDiff diff : snapshottable.getDiffs()) { + if (diff.isSnapshotRoot()) { + n++; + } + } + final int numSnapshots = snapshottable.getNumSnapshots(); + Preconditions.checkState(n == numSnapshots, + "numSnapshots = " + numSnapshots + " != " + n); + out.print(", #snapshot="); + out.println(n); + } + + @Override + public void preVisitSub(Element sub, int index, boolean isLast) { + prefix.append(isLast? LAST_ITEM : NON_LAST_ITEM); + } + + @Override + public void postVisitSub(Element sub, int index, boolean isLast) { + prefix.setLength(prefix.length() - 2); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespaceVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespaceVisitor.java new file mode 100644 index 00000000000..2360b59373b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespaceVisitor.java @@ -0,0 +1,243 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.visitor; + +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeReference; +import org.apache.hadoop.hdfs.server.namenode.INodeSymlink; +import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature; +import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; + +import java.util.Iterator; + +/** + * For visiting namespace trees. + */ +public interface NamespaceVisitor { + /** For visiting any {@link INode}. */ + interface INodeVisitor { + INodeVisitor DEFAULT = new INodeVisitor() {}; + + /** Visiting the given {@link INode}. */ + default void visit(INode iNode, int snapshot) { + } + } + + /** @return the default (non-recursive) {@link INodeVisitor}. */ + default INodeVisitor getDefaultVisitor() { + return INodeVisitor.DEFAULT; + } + + /** Visiting the given {@link INodeFile}. */ + default void visitFile(INodeFile file, int snapshot) { + getDefaultVisitor().visit(file, snapshot); + } + + /** Visiting the given {@link INodeSymlink}. */ + default void visitSymlink(INodeSymlink symlink, int snapshot) { + getDefaultVisitor().visit(symlink, snapshot); + } + + /** Visiting the given {@link INodeReference} (non-recursively). */ + default void visitReference(INodeReference ref, int snapshot) { + getDefaultVisitor().visit(ref, snapshot); + } + + /** First visit the given {@link INodeReference} and then the referred. */ + default void visitReferenceRecursively(INodeReference ref, int snapshot) { + visitReference(ref, snapshot); + + final INode referred = ref.getReferredINode(); + preVisitReferred(referred); + referred.accept(this, snapshot); + postVisitReferred(referred); + } + + /** Right before visiting the given referred {@link INode}. */ + default void preVisitReferred(INode referred) { + } + + /** Right after visiting the given referred {@link INode}. */ + default void postVisitReferred(INode referred) { + } + + /** Visiting the given {@link INodeDirectory} (non-recursively). */ + default void visitDirectory(INodeDirectory dir, int snapshot) { + getDefaultVisitor().visit(dir, snapshot); + } + + /** + * First visit the given {@link INodeDirectory}; + * then the children; + * and then, if snapshottable, the snapshots. */ + default void visitDirectoryRecursively(INodeDirectory dir, int snapshot) { + visitDirectory(dir, snapshot); + visitSubs(getChildren(dir, snapshot)); + + if (snapshot == Snapshot.CURRENT_STATE_ID) { + final DirectorySnapshottableFeature snapshottable + = dir.getDirectorySnapshottableFeature(); + if (snapshottable != null) { + visitSnapshottable(dir, snapshottable); + visitSubs(getSnapshots(snapshottable)); + } + } + } + + /** + * Right before visiting the given sub {@link Element}. + * The sub element may be a child of an {@link INodeDirectory} + * or a snapshot in {@link DirectorySnapshottableFeature}. + * + * @param sub the element to be visited. + * @param index the index of the sub element. + * @param isLast is the sub element the last element? + */ + default void preVisitSub(Element sub, int index, boolean isLast) { + } + + /** + * Right after visiting the given sub {@link Element}. + * The sub element may be a child of an {@link INodeDirectory} + * or a snapshot in {@link DirectorySnapshottableFeature}. + * + * @param sub the element just visited. + * @param index the index of the sub element. + * @param isLast is the sub element the last element? + */ + default void postVisitSub(Element sub, int index, boolean isLast) { + } + + /** Visiting a {@link DirectorySnapshottableFeature}. */ + default void visitSnapshottable(INodeDirectory dir, + DirectorySnapshottableFeature snapshottable) { + } + + /** + * Visiting the sub {@link Element}s recursively. + * + * @param subs the children of an {@link INodeDirectory} + * or the snapshots in {@link DirectorySnapshottableFeature}. + */ + default void visitSubs(Iterable subs) { + if (subs == null) { + return; + } + int index = 0; + for(final Iterator i = subs.iterator(); i.hasNext();) { + final Element e = i.next(); + final boolean isList = !i.hasNext(); + preVisitSub(e, index, isList); + e.getInode().accept(this, e.getSnapshotId()); + postVisitSub(e, index, isList); + index++; + } + } + + /** @return the children as {@link Element}s. */ + static Iterable getChildren(INodeDirectory dir, int snapshot) { + final Iterator i = dir.getChildrenList(snapshot).iterator(); + return new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + @Override + public boolean hasNext() { + return i.hasNext(); + } + + @Override + public Element next() { + return new Element(snapshot, i.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + /** @return the snapshots as {@link Element}s. */ + static Iterable getSnapshots( + DirectorySnapshottableFeature snapshottable) { + final Iterator i + = snapshottable.getDiffs().iterator(); + return new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + private DirectoryWithSnapshotFeature.DirectoryDiff next = findNext(); + + private DirectoryWithSnapshotFeature.DirectoryDiff findNext() { + for(; i.hasNext();) { + final DirectoryWithSnapshotFeature.DirectoryDiff diff = i.next(); + if (diff.isSnapshotRoot()) { + return diff; + } + } + return null; + } + + @Override + public boolean hasNext() { + return next != null; + } + + @Override + public Element next() { + final int id = next.getSnapshotId(); + final Element e = new Element(id, + snapshottable.getSnapshotById(id).getRoot()); + next = findNext(); + return e; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + /** Snapshot and INode. */ + class Element { + private final int snapshotId; + private final INode inode; + + Element(int snapshot, INode inode) { + this.snapshotId = snapshot; + this.inode = inode; + } + + INode getInode() { + return inode; + } + + int getSnapshotId() { + return snapshotId; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/package.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/package.html new file mode 100644 index 00000000000..f25748136d6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/package.html @@ -0,0 +1,21 @@ + + + +

Use the visitor pattern to visit namespace tree.

+ + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 3fd725b7f69..de527f0bff7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; +import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; @@ -607,6 +608,9 @@ public class TestFSImageWithSnapshot { final String b = fsn.getFSDirectory().getINode("/").dumpTreeRecursively().toString(); output.println(b); + + final String s = NamespacePrintVisitor.print2Sting(fsn); + Assert.assertEquals(b, s); return b; }