From 099762a0bc960066f8157fdd1e495b6752a6f802 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 13 Nov 2012 19:59:55 +0000 Subject: [PATCH] HDFS-4177. Add a snapshot parameter to INodeDirectory.getChildrenList() for selecting particular snapshot children list views. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1408923 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-2802.txt | 3 + .../hdfs/server/namenode/FSDirectory.java | 50 ++-- .../hdfs/server/namenode/FSImageFormat.java | 7 +- .../server/namenode/FSPermissionChecker.java | 12 +- .../hadoop/hdfs/server/namenode/INode.java | 7 +- .../hdfs/server/namenode/INodeDirectory.java | 85 ++++--- .../namenode/snapshot/SnapshotManager.java | 7 +- .../apache/hadoop/hdfs/util/ReadOnlyList.java | 222 ++++++++++++++++++ .../namenode/TestSnapshotPathINodes.java | 3 +- 9 files changed, 316 insertions(+), 80 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 3e43d3faf1c..7807519b8c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -64,3 +64,6 @@ Branch-2802 Snapshot (Unreleased) and has snapshots. (Jing Zhao via szetszwo) HDFS-4170. Add snapshot information to INodesInPath. (szetszwo) + + HDFS-4177. Add a snapshot parameter to INodeDirectory.getChildrenList() for + selecting particular snapshot children list views. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index af5831ce7b5..e8fbfde76d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -23,7 +23,6 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; -import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -60,7 +59,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ByteArray; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.base.Preconditions; @@ -696,14 +697,15 @@ public class FSDirectory implements Closeable { + error); throw new FileAlreadyExistsException(error); } - List children = dstInode.isDirectory() ? - ((INodeDirectory) dstInode).getChildren() : null; - if (children != null && children.size() != 0) { - error = "rename cannot overwrite non empty destination directory " - + dst; - NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " - + error); - throw new IOException(error); + if (dstInode.isDirectory()) { + final ReadOnlyList children = ((INodeDirectory) dstInode + ).getChildrenList(dstInodesInPath.getPathSnapshot()); + if (!children.isEmpty()) { + error = "rename destination directory is not empty: " + dst; + NameNode.stateChangeLog.warn( + "DIR* FSDirectory.unprotectedRenameTo: " + error); + throw new IOException(error); + } } INode snapshotNode = hasSnapshot(dstInode); if (snapshotNode != null) { @@ -1072,12 +1074,14 @@ public class FSDirectory implements Closeable { boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException { readLock(); try { - final INode inode = rootDir.getNode(path, false); + final INodesInPath inodesInPath = rootDir.getINodesInPath(path, false); + final INode inode = inodesInPath.getINode(0); if (inode == null || !inode.isDirectory()) { //not found or not a directory return false; } - return ((INodeDirectory)inode).getChildrenList().size() != 0; + final Snapshot s = inodesInPath.getPathSnapshot(); + return !((INodeDirectory)inode).getChildrenList(s).isEmpty(); } finally { readUnlock(); } @@ -1155,13 +1159,10 @@ public class FSDirectory implements Closeable { && ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) { return target; } - List children = targetDir.getChildren(); - if (children != null) { - for (INode child : children) { - INode snapshotDir = hasSnapshot(child); - if (snapshotDir != null) { - return snapshotDir; - } + for (INode child : targetDir.getChildrenList(null)) { + INode snapshotDir = hasSnapshot(child); + if (snapshotDir != null) { + return snapshotDir; } } } @@ -1195,7 +1196,7 @@ public class FSDirectory implements Closeable { replaceINodeUnsynced(path, oldnode, newnode); //update children's parent directory - for(INode i : newnode.getChildrenList()) { + for(INode i : newnode.getChildrenList(null)) { i.parent = newnode; } } finally { @@ -1239,7 +1240,8 @@ public class FSDirectory implements Closeable { readLock(); try { - INode targetNode = rootDir.getNode(srcs, true); + final INodesInPath inodesInPath = rootDir.getINodesInPath(srcs, true); + final INode targetNode = inodesInPath.getINode(0); if (targetNode == null) return null; @@ -1248,8 +1250,10 @@ public class FSDirectory implements Closeable { new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME, targetNode, needLocation)}, 0); } + INodeDirectory dirInode = (INodeDirectory)targetNode; - List contents = dirInode.getChildrenList(); + final ReadOnlyList contents = dirInode.getChildrenList( + inodesInPath.getPathSnapshot()); int startChild = dirInode.nextChild(startAfter); int totalNumChildren = contents.size(); int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit); @@ -1738,7 +1742,7 @@ public class FSDirectory implements Closeable { } if (maxDirItems != 0) { INodeDirectory parent = (INodeDirectory)pathComponents[pos-1]; - int count = parent.getChildrenList().size(); + int count = parent.getChildrenList(null).size(); if (count >= maxDirItems) { throw new MaxDirectoryItemsExceededException(maxDirItems, count); } @@ -1881,7 +1885,7 @@ public class FSDirectory implements Closeable { * INode. using 'parent' is not currently recommended. */ nodesInPath.add(dir); - for (INode child : dir.getChildrenList()) { + for (INode child : dir.getChildrenList(null)) { if (child.isDirectory()) { updateCountForINodeWithQuota((INodeDirectory)child, counts, nodesInPath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 62d9a361478..6aa009d36c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -30,7 +30,6 @@ import java.security.DigestInputStream; import java.security.DigestOutputStream; import java.security.MessageDigest; import java.util.Arrays; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; @@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; @@ -530,9 +530,10 @@ class FSImageFormat { private void saveImage(ByteBuffer currentDirName, INodeDirectory current, DataOutputStream out) throws IOException { - List children = current.getChildren(); - if (children == null || children.isEmpty()) + final ReadOnlyList children = current.getChildrenList(null); + if (children.isEmpty()) { return; + } // print prefix (parent directory name) int prefixLen = currentDirName.position(); if (prefixLen == 0) { // root diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 91ebc968a04..3031190ea1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -28,6 +28,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -121,7 +123,8 @@ class FSPermissionChecker { } // check if (parentAccess != null) && file exists, then check sb // Resolve symlinks, the check is performed on the link target. - final INode[] inodes = root.getExistingPathINodes(path, true).getINodes(); + final INodesInPath inodesInPath = root.getExistingPathINodes(path, true); + final INode[] inodes = inodesInPath.getINodes(); int ancestorIndex = inodes.length - 2; for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); @@ -141,7 +144,8 @@ class FSPermissionChecker { check(inodes[inodes.length - 1], access); } if (subAccess != null) { - checkSubAccess(inodes[inodes.length - 1], subAccess); + final Snapshot s = inodesInPath.getPathSnapshot(); + checkSubAccess(inodes[inodes.length - 1], s, subAccess); } if (doCheckOwner) { checkOwner(inodes[inodes.length - 1]); @@ -162,7 +166,7 @@ class FSPermissionChecker { } } - private void checkSubAccess(INode inode, FsAction access + private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access ) throws AccessControlException { if (inode == null || !inode.isDirectory()) { return; @@ -173,7 +177,7 @@ class FSPermissionChecker { INodeDirectory d = directories.pop(); check(d, access); - for(INode child : d.getChildrenList()) { + for(INode child : d.getChildrenList(snapshot)) { if (child.isDirectory()) { directories.push((INodeDirectory)child); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index c9588b9c549..96614cf63eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; @@ -48,7 +49,11 @@ import com.google.common.primitives.SignedBytes; */ @InterfaceAudience.Private public abstract class INode implements Comparable { - static final List EMPTY_LIST = Collections.unmodifiableList(new ArrayList()); + static final List EMPTY_LIST + = Collections.unmodifiableList(new ArrayList()); + static final ReadOnlyList EMPTY_READ_ONLY_LIST + = ReadOnlyList.Util.asReadOnlyList(EMPTY_LIST); + /** * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 6458a2ceb68..e5153e711d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; @@ -55,16 +56,14 @@ public class INodeDirectory extends INode { protected static final int DEFAULT_FILES_PER_DIRECTORY = 5; final static String ROOT_NAME = ""; - private List children; + private List children = null; public INodeDirectory(String name, PermissionStatus permissions) { super(name, permissions); - this.children = null; } public INodeDirectory(PermissionStatus permissions, long mTime) { super(permissions, mTime, 0); - this.children = null; } /** constructor */ @@ -79,7 +78,7 @@ public class INodeDirectory extends INode { */ public INodeDirectory(INodeDirectory other) { super(other); - this.children = other.getChildren(); + this.children = other.children; } /** @return true unconditionally. */ @@ -118,39 +117,23 @@ public class INodeDirectory extends INode { throw new IllegalArgumentException("No child exists to be replaced"); } } - - INode getChild(String name) { - return getChildINode(DFSUtil.string2Bytes(name)); + + private INode getChild(byte[] name, Snapshot snapshot) { + final ReadOnlyList c = getChildrenList(snapshot); + final int i = ReadOnlyList.Util.binarySearch(c, name); + return i < 0? null: c.get(i); } - private INode getChildINode(byte[] name) { - if (children == null) { - return null; - } - int low = Collections.binarySearch(children, name); - if (low >= 0) { - return children.get(low); - } - return null; - } - - /** - * @return the INode of the last component in components, or null if the last - * component does not exist. - */ - private INode getNode(byte[][] components, boolean resolveLink + /** @return the {@link INodesInPath} containing only the last inode. */ + INodesInPath getINodesInPath(String path, boolean resolveLink ) throws UnresolvedLinkException { - INodesInPath inodesInPath = getExistingPathINodes(components, 1, - resolveLink); - return inodesInPath.inodes[0]; + return getExistingPathINodes(getPathComponents(path), 1, resolveLink); } - /** - * This is the external interface - */ + /** @return the last inode in the path. */ INode getNode(String path, boolean resolveLink) throws UnresolvedLinkException { - return getNode(getPathComponents(path), resolveLink); + return getINodesInPath(path, resolveLink).getINode(0); } /** @@ -269,7 +252,8 @@ public class INodeDirectory extends INode { } } else { // normal case, and also for resolving file/dir under snapshot root - curNode = parentDir.getChildINode(components[count + 1]); + curNode = parentDir.getChild(components[count + 1], + existing.getPathSnapshot()); } count++; index++; @@ -470,16 +454,14 @@ public class INodeDirectory extends INode { } /** - * @return an empty list if the children list is null; - * otherwise, return the children list. - * The returned list should not be modified. + * @return the current children list if the specified snapshot is null; + * otherwise, return the children list corresponding to the snapshot. + * Note that the returned list is never null. */ - public List getChildrenList() { - return children==null ? EMPTY_LIST : children; - } - /** @return the children list which is possibly null. */ - public List getChildren() { - return children; + public ReadOnlyList getChildrenList(final Snapshot snapshot) { + //TODO: use snapshot to select children list + return children == null ? EMPTY_READ_ONLY_LIST + : ReadOnlyList.Util.asReadOnlyList(children); } /** Set the children list. */ public void setChildren(List children) { @@ -545,11 +527,19 @@ public class INodeDirectory extends INode { } /** - * @return the snapshot associated to the path. - * @see #snapshot + * For non-snapshot paths, return the latest snapshot found in the path. + * For snapshot paths, return null. */ - public Snapshot getSnapshot() { - return snapshot; + public Snapshot getLatestSnapshot() { + return isSnapshot? null: snapshot; + } + + /** + * For snapshot paths, return the snapshot specified in the path. + * For non-snapshot paths, return null. + */ + public Snapshot getPathSnapshot() { + return isSnapshot? snapshot: null; } private void setSnapshot(Snapshot s) { @@ -576,6 +566,11 @@ public class INodeDirectory extends INode { return inodes; } + /** @return the i-th inode. */ + INode getINode(int i) { + return inodes[i]; + } + /** * @return index of the {@link INodeDirectoryWithSnapshot} in * {@link #inodes} for snapshot path, else -1. @@ -626,7 +621,7 @@ public class INodeDirectory extends INode { for(int i = 1; i < inodes.length; i++) { b.append(", ").append(toString(inodes[i])); } - b.append("]"); + b.append("], length=").append(inodes.length); } b.append("\n numNonNull = ").append(numNonNull) .append("\n capacity = ").append(capacity) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 6cd07c60c89..91983d0befd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INodeSymlink; +import org.apache.hadoop.hdfs.util.ReadOnlyList; /** Manage snapshottable directories and their snapshots. */ public class SnapshotManager implements SnapshotStats { @@ -124,10 +125,10 @@ public class SnapshotManager implements SnapshotStats { /** Process snapshot creation recursively. */ private void processRecursively(final INodeDirectory srcDir, final INodeDirectory dstDir) throws IOException { - final List children = srcDir.getChildren(); - if (children != null) { + final ReadOnlyList children = srcDir.getChildrenList(null); + if (!children.isEmpty()) { final List inodes = new ArrayList(children.size()); - for(final INode c : new ArrayList(children)) { + for(final INode c : new ArrayList(ReadOnlyList.Util.asList(children))) { final INode i; if (c == null) { i = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java new file mode 100644 index 00000000000..c7f36da3214 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * A {@link ReadOnlyList} is a unmodifiable list, + * which supports read-only operations. + * + * @param The type of the list elements. + */ +@InterfaceAudience.Private +public interface ReadOnlyList extends Iterable { + /** + * Is this an empty list? + */ + boolean isEmpty(); + + /** + * @return the size of this list. + */ + int size(); + + /** + * @return the i-th element. + */ + E get(int i); + + /** + * Utilities for {@link ReadOnlyList} + */ + public static class Util { + /** + * The same as {@link Collections#binarySearch(List, Object)} + * except that the list is a {@link ReadOnlyList}. + * + * @return the insertion point defined + * in {@link Collections#binarySearch(List, Object)}. + */ + public static > int binarySearch( + final ReadOnlyList list, final K key) { + return Collections.binarySearch(asList(list), key); + } + + /** + * @return a {@link ReadOnlyList} view of the given list. + */ + public static ReadOnlyList asReadOnlyList(final List list) { + return new ReadOnlyList() { + @Override + public Iterator iterator() { + return list.iterator(); + } + + @Override + public boolean isEmpty() { + return list.isEmpty(); + } + + @Override + public int size() { + return list.size(); + } + + @Override + public E get(int i) { + return list.get(i); + } + }; + } + + /** + * @return a {@link List} view of the given list. + */ + public static List asList(final ReadOnlyList list) { + return new List() { + @Override + public Iterator iterator() { + return list.iterator(); + } + + @Override + public boolean isEmpty() { + return list.isEmpty(); + } + + @Override + public int size() { + return list.size(); + } + + @Override + public E get(int i) { + return list.get(i); + } + + @Override + public Object[] toArray() { + final Object[] a = new Object[size()]; + for(int i = 0; i < a.length; i++) { + a[i] = get(i); + } + return a; + } + + //All methods below are not supported. + + @Override + public boolean add(E e) { + throw new UnsupportedOperationException(); + } + + @Override + public void add(int index, E element) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(int index, Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean contains(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public int indexOf(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public int lastIndexOf(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public ListIterator listIterator() { + throw new UnsupportedOperationException(); + } + + @Override + public ListIterator listIterator(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public E remove(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public E set(int index, E element) { + throw new UnsupportedOperationException(); + } + + @Override + public List subList(int fromIndex, int toIndex) { + throw new UnsupportedOperationException(); + } + + @Override + public T[] toArray(T[] a) { + throw new UnsupportedOperationException(); + } + }; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java index 9af9f7483e8..9f32ee1b986 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java @@ -118,7 +118,8 @@ public class TestSnapshotPathINodes { final Snapshot snapshot, int index) { assertEquals(isSnapshot, inodesInPath.isSnapshot()); assertEquals(index, inodesInPath.getSnapshotRootIndex()); - assertEquals(snapshot, inodesInPath.getSnapshot()); + assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot()); + assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot()); } /**