HDFS-4118. Change INodeDirectory.getExistingPathINodes(..) to work with snapshots. Contributed by Jing Zhao.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1403959 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
783124488c
commit
f3b9963d46
|
@ -102,4 +102,9 @@ public class HdfsConstants {
|
||||||
*/
|
*/
|
||||||
public static final int LAYOUT_VERSION = LayoutVersion
|
public static final int LAYOUT_VERSION = LayoutVersion
|
||||||
.getCurrentLayoutVersion();
|
.getCurrentLayoutVersion();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A special path component contained in the path for a snapshot file/dir
|
||||||
|
*/
|
||||||
|
public static final String DOT_SNAPSHOT_DIR = ".snapshot";
|
||||||
}
|
}
|
||||||
|
|
|
@ -300,50 +300,6 @@ public class FSDirectory implements Closeable {
|
||||||
return newNode;
|
return newNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add an INodeFileSnapshot to the source file. */
|
|
||||||
INodeFileSnapshot addFileSnapshot(String srcPath, String dstPath
|
|
||||||
) throws IOException, QuotaExceededException {
|
|
||||||
waitForReady();
|
|
||||||
|
|
||||||
final INodeFile src = INodeFile.valueOf(rootDir.getNode(srcPath, false), srcPath);
|
|
||||||
INodeFileSnapshot snapshot = new INodeFileSnapshot(src, src.computeFileSize(true));
|
|
||||||
|
|
||||||
writeLock();
|
|
||||||
try {
|
|
||||||
//add destination snaplink
|
|
||||||
snapshot = addNode(dstPath, snapshot, UNKNOWN_DISK_SPACE);
|
|
||||||
|
|
||||||
final INodeFileWithLink srcWithLink;
|
|
||||||
if (snapshot != null) {
|
|
||||||
//added snapshot node successfully, check source type,
|
|
||||||
if (src instanceof INodeFileWithLink) {
|
|
||||||
srcWithLink = (INodeFileWithLink)src;
|
|
||||||
} else {
|
|
||||||
//source is an INodeFile, replace the source.
|
|
||||||
srcWithLink = new INodeFileWithLink(src);
|
|
||||||
replaceNode(srcPath, src, srcWithLink);
|
|
||||||
}
|
|
||||||
|
|
||||||
//insert the snapshot to src's linked list.
|
|
||||||
srcWithLink.insert(snapshot);
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
writeUnlock();
|
|
||||||
|
|
||||||
if (snapshot == null) {
|
|
||||||
NameNode.stateChangeLog.info(
|
|
||||||
"DIR* FSDirectory.addFileSnapshot: failed to add " + dstPath);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
|
||||||
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFileSnapshot: "
|
|
||||||
+ dstPath + " is added to the file system");
|
|
||||||
}
|
|
||||||
return snapshot;
|
|
||||||
}
|
|
||||||
|
|
||||||
INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
|
INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
|
||||||
INode newNode, boolean propagateModTime) {
|
INode newNode, boolean propagateModTime) {
|
||||||
// NOTE: This does not update space counts for parents
|
// NOTE: This does not update space counts for parents
|
||||||
|
|
|
@ -228,7 +228,7 @@ public abstract class INode implements Comparable<byte[]> {
|
||||||
* Get local file name
|
* Get local file name
|
||||||
* @return local file name
|
* @return local file name
|
||||||
*/
|
*/
|
||||||
String getLocalName() {
|
public String getLocalName() {
|
||||||
return DFSUtil.bytes2String(name);
|
return DFSUtil.bytes2String(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,6 +280,11 @@ public abstract class INode implements Comparable<byte[]> {
|
||||||
return this.parent;
|
return this.parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Set parent directory */
|
||||||
|
public void setParent(INodeDirectory parent) {
|
||||||
|
this.parent = parent;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get last modification time of inode.
|
* Get last modification time of inode.
|
||||||
* @return access time
|
* @return access time
|
||||||
|
|
|
@ -27,7 +27,10 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshotRoot;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Directory INode class.
|
* Directory INode class.
|
||||||
|
@ -190,9 +193,8 @@ public class INodeDirectory extends INode {
|
||||||
* be thrown when the path refers to a symbolic link.
|
* be thrown when the path refers to a symbolic link.
|
||||||
* @return the specified number of existing INodes in the path
|
* @return the specified number of existing INodes in the path
|
||||||
*/
|
*/
|
||||||
INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes,
|
INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes,
|
||||||
boolean resolveLink)
|
boolean resolveLink) throws UnresolvedLinkException {
|
||||||
throws UnresolvedLinkException {
|
|
||||||
assert this.compareTo(components[0]) == 0 :
|
assert this.compareTo(components[0]) == 0 :
|
||||||
"Incorrect name " + getLocalName() + " expected "
|
"Incorrect name " + getLocalName() + " expected "
|
||||||
+ (components[0] == null? null: DFSUtil.bytes2String(components[0]));
|
+ (components[0] == null? null: DFSUtil.bytes2String(components[0]));
|
||||||
|
@ -207,7 +209,7 @@ public class INodeDirectory extends INode {
|
||||||
while (count < components.length && curNode != null) {
|
while (count < components.length && curNode != null) {
|
||||||
final boolean lastComp = (count == components.length - 1);
|
final boolean lastComp = (count == components.length - 1);
|
||||||
if (index >= 0) {
|
if (index >= 0) {
|
||||||
existing.inodes[index] = curNode;
|
existing.addNode(curNode);
|
||||||
}
|
}
|
||||||
if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) {
|
if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) {
|
||||||
final String path = constructPath(components, 0, components.length);
|
final String path = constructPath(components, 0, components.length);
|
||||||
|
@ -228,13 +230,45 @@ public class INodeDirectory extends INode {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
INodeDirectory parentDir = (INodeDirectory)curNode;
|
INodeDirectory parentDir = (INodeDirectory)curNode;
|
||||||
curNode = parentDir.getChildINode(components[count + 1]);
|
|
||||||
|
// check if the next byte[] in components is for ".snapshot"
|
||||||
|
if (isDotSnapshotDir(components[count + 1])
|
||||||
|
&& (curNode instanceof INodeDirectorySnapshottable)) {
|
||||||
|
// skip the ".snapshot" in components
|
||||||
|
count++;
|
||||||
|
index++;
|
||||||
|
existing.isSnapshot = true;
|
||||||
|
if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
|
||||||
|
existing.capacity--;
|
||||||
|
}
|
||||||
|
// check if ".snapshot" is the last element of components
|
||||||
|
if (count == components.length - 1) {
|
||||||
|
return existing;
|
||||||
|
}
|
||||||
|
// Resolve snapshot root
|
||||||
|
curNode = ((INodeDirectorySnapshottable) parentDir)
|
||||||
|
.getSnapshotINode(components[count + 1]);
|
||||||
|
if (index >= -1) {
|
||||||
|
existing.snapshotRootIndex = existing.size;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// normal case, and also for resolving file/dir under snapshot root
|
||||||
|
curNode = parentDir.getChildINode(components[count + 1]);
|
||||||
|
}
|
||||||
count++;
|
count++;
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
return existing;
|
return existing;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
|
||||||
|
*/
|
||||||
|
private static boolean isDotSnapshotDir(byte[] pathComponent) {
|
||||||
|
return pathComponent == null ? false : HdfsConstants.DOT_SNAPSHOT_DIR
|
||||||
|
.equalsIgnoreCase(DFSUtil.bytes2String(pathComponent));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve the existing INodes along the given path. The first INode
|
* Retrieve the existing INodes along the given path. The first INode
|
||||||
* always exist and is this INode.
|
* always exist and is this INode.
|
||||||
|
@ -455,18 +489,83 @@ public class INodeDirectory extends INode {
|
||||||
/**
|
/**
|
||||||
* Used by
|
* Used by
|
||||||
* {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
|
* {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
|
||||||
* Containing INodes information resolved from a given path.
|
* Contains INodes information resolved from a given path.
|
||||||
*/
|
*/
|
||||||
static class INodesInPath {
|
static class INodesInPath {
|
||||||
|
/**
|
||||||
|
* Array with the specified number of INodes resolved for a given path.
|
||||||
|
*/
|
||||||
private INode[] inodes;
|
private INode[] inodes;
|
||||||
|
/**
|
||||||
public INodesInPath(int number) {
|
* Indicate the number of non-null elements in {@link #inodes}
|
||||||
|
*/
|
||||||
|
private int size;
|
||||||
|
/**
|
||||||
|
* The path for a snapshot file/dir contains the .snapshot thus makes the
|
||||||
|
* length of the path components larger the number of inodes. We use
|
||||||
|
* the capacity to control this special case.
|
||||||
|
*/
|
||||||
|
private int capacity;
|
||||||
|
/**
|
||||||
|
* true if this path corresponds to a snapshot
|
||||||
|
*/
|
||||||
|
private boolean isSnapshot;
|
||||||
|
/**
|
||||||
|
* Index of {@link INodeDirectorySnapshotRoot} for snapshot path, else -1
|
||||||
|
*/
|
||||||
|
private int snapshotRootIndex;
|
||||||
|
|
||||||
|
INodesInPath(int number) {
|
||||||
assert (number >= 0);
|
assert (number >= 0);
|
||||||
this.inodes = new INode[number];
|
inodes = new INode[number];
|
||||||
|
capacity = number;
|
||||||
|
size = 0;
|
||||||
|
isSnapshot = false;
|
||||||
|
snapshotRootIndex = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the whole inodes array including the null elements.
|
||||||
|
*/
|
||||||
|
INode[] getINodes() {
|
||||||
|
if (capacity < inodes.length) {
|
||||||
|
INode[] newNodes = new INode[capacity];
|
||||||
|
for (int i = 0; i < capacity; i++) {
|
||||||
|
newNodes[i] = inodes[i];
|
||||||
|
}
|
||||||
|
inodes = newNodes;
|
||||||
|
}
|
||||||
|
return inodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
INode[] getINodes() {
|
/**
|
||||||
return inodes;
|
* @return index of the {@link INodeDirectorySnapshotRoot} in
|
||||||
|
* {@link #inodes} for snapshot path, else -1.
|
||||||
|
*/
|
||||||
|
int getSnapshotRootIndex() {
|
||||||
|
return this.snapshotRootIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return isSnapshot true for a snapshot path
|
||||||
|
*/
|
||||||
|
boolean isSnapshot() {
|
||||||
|
return this.isSnapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add an INode at the end of the array
|
||||||
|
*/
|
||||||
|
private void addNode(INode node) {
|
||||||
|
assert size < inodes.length;
|
||||||
|
inodes[size++] = node;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The number of non-null elements
|
||||||
|
*/
|
||||||
|
int getSize() {
|
||||||
|
return size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
@ -57,6 +58,18 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
|
||||||
/** A list of snapshots of this directory. */
|
/** A list of snapshots of this directory. */
|
||||||
private final List<INodeDirectorySnapshotRoot> snapshots
|
private final List<INodeDirectorySnapshotRoot> snapshots
|
||||||
= new ArrayList<INodeDirectorySnapshotRoot>();
|
= new ArrayList<INodeDirectorySnapshotRoot>();
|
||||||
|
|
||||||
|
public INode getSnapshotINode(byte[] name) {
|
||||||
|
if (snapshots == null || snapshots.size() == 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
int low = Collections.binarySearch(snapshots, name);
|
||||||
|
if (low >= 0) {
|
||||||
|
return snapshots.get(low);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
/** Number of snapshots is allowed. */
|
/** Number of snapshots is allowed. */
|
||||||
private int snapshotQuota;
|
private int snapshotQuota;
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ public class INodeFileWithLink extends INodeFile {
|
||||||
|
|
||||||
public INodeFileWithLink(INodeFile f) {
|
public INodeFileWithLink(INodeFile f) {
|
||||||
super(f);
|
super(f);
|
||||||
|
setLocalName(f.getLocalName());
|
||||||
next = this;
|
next = this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ public class SnapshotManager {
|
||||||
final List<INode> children = srcDir.getChildren();
|
final List<INode> children = srcDir.getChildren();
|
||||||
if (children != null) {
|
if (children != null) {
|
||||||
final List<INode> inodes = new ArrayList<INode>(children.size());
|
final List<INode> inodes = new ArrayList<INode>(children.size());
|
||||||
for(final INode c : children) {
|
for(final INode c : new ArrayList<INode>(children)) {
|
||||||
final INode i;
|
final INode i;
|
||||||
if (c == null) {
|
if (c == null) {
|
||||||
i = null;
|
i = null;
|
||||||
|
@ -127,6 +127,7 @@ public class SnapshotManager {
|
||||||
throw new AssertionError("Unknow INode type: " + c.getClass()
|
throw new AssertionError("Unknow INode type: " + c.getClass()
|
||||||
+ ", inode = " + c);
|
+ ", inode = " + c);
|
||||||
}
|
}
|
||||||
|
i.setParent(dstDir);
|
||||||
inodes.add(i);
|
inodes.add(i);
|
||||||
}
|
}
|
||||||
dstDir.setChildren(inodes);
|
dstDir.setChildren(inodes);
|
||||||
|
|
|
@ -0,0 +1,394 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshotRoot;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/** Test snapshot related operations. */
|
||||||
|
public class TestSnapshot {
|
||||||
|
private static final long seed = 0;
|
||||||
|
private static final short REPLICATION = 3;
|
||||||
|
|
||||||
|
private final Path dir = new Path("/TestSnapshot");
|
||||||
|
|
||||||
|
private final Path sub1 = new Path(dir, "sub1");
|
||||||
|
private final Path file1 = new Path(sub1, "file1");
|
||||||
|
private final Path file2 = new Path(sub1, "file2");
|
||||||
|
|
||||||
|
private Configuration conf;
|
||||||
|
private MiniDFSCluster cluster;
|
||||||
|
private FSNamesystem fsn;
|
||||||
|
private FSDirectory fsdir;
|
||||||
|
|
||||||
|
private DistributedFileSystem hdfs;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
conf = new Configuration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(REPLICATION)
|
||||||
|
.build();
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
fsn = cluster.getNamesystem();
|
||||||
|
fsdir = fsn.getFSDirectory();
|
||||||
|
|
||||||
|
hdfs = cluster.getFileSystem();
|
||||||
|
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
|
||||||
|
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Test allow-snapshot operation. */
|
||||||
|
@Test
|
||||||
|
public void testAllowSnapshot() throws Exception {
|
||||||
|
final String path = sub1.toString();
|
||||||
|
final INode before = fsdir.getINode(path);
|
||||||
|
|
||||||
|
// Before a directory is snapshottable
|
||||||
|
Assert.assertTrue(before instanceof INodeDirectory);
|
||||||
|
Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
|
||||||
|
|
||||||
|
// After a directory is snapshottable
|
||||||
|
hdfs.allowSnapshot(path);
|
||||||
|
final INode after = fsdir.getINode(path);
|
||||||
|
Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
|
||||||
|
* for normal (non-snapshot) file.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNonSnapshotPathINodes() throws Exception {
|
||||||
|
// Get the inodes by resolving the path of a normal file
|
||||||
|
String[] names = INode.getPathNames(file1.toString());
|
||||||
|
byte[][] components = INode.getPathComponents(names);
|
||||||
|
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
INode[] inodes = nodesInPath.getINodes();
|
||||||
|
// The number of inodes should be equal to components.length
|
||||||
|
assertEquals(inodes.length, components.length);
|
||||||
|
// The returned nodesInPath should be non-snapshot
|
||||||
|
assertFalse(nodesInPath.isSnapshot());
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
// The last INode should be associated with file1
|
||||||
|
assertEquals(inodes[components.length - 1].getFullPathName(),
|
||||||
|
file1.toString());
|
||||||
|
assertEquals(inodes[components.length - 2].getFullPathName(),
|
||||||
|
sub1.toString());
|
||||||
|
assertEquals(inodes[components.length - 3].getFullPathName(),
|
||||||
|
dir.toString());
|
||||||
|
|
||||||
|
// Call getExistingPathINodes and request only one INode. This is used
|
||||||
|
// when identifying the INode for a given path.
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
assertEquals(inodes.length, 1);
|
||||||
|
assertFalse(nodesInPath.isSnapshot());
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
assertEquals(inodes[0].getFullPathName(), file1.toString());
|
||||||
|
|
||||||
|
// Call getExistingPathINodes and request 2 INodes. This is usually used
|
||||||
|
// when identifying the parent INode of a given path.
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
assertEquals(inodes.length, 2);
|
||||||
|
assertFalse(nodesInPath.isSnapshot());
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
assertEquals(inodes[1].getFullPathName(), file1.toString());
|
||||||
|
assertEquals(inodes[0].getFullPathName(), sub1.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
|
||||||
|
* for snapshot file.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSnapshotPathINodes() throws Exception {
|
||||||
|
// Create a snapshot for the dir, and check the inodes for the path
|
||||||
|
// pointing to a snapshot file
|
||||||
|
hdfs.allowSnapshot(sub1.toString());
|
||||||
|
hdfs.createSnapshot("s1", sub1.toString());
|
||||||
|
// The path when accessing the snapshot file of file1 is
|
||||||
|
// /TestSnapshot/sub1/.snapshot/s1/file1
|
||||||
|
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
|
||||||
|
String[] names = INode.getPathNames(snapshotPath);
|
||||||
|
byte[][] components = INode.getPathComponents(names);
|
||||||
|
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
INode[] inodes = nodesInPath.getINodes();
|
||||||
|
// Length of inodes should be (components.length - 1), since we will ignore
|
||||||
|
// ".snapshot"
|
||||||
|
assertEquals(inodes.length, components.length - 1);
|
||||||
|
assertTrue(nodesInPath.isSnapshot());
|
||||||
|
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
|
||||||
|
assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof
|
||||||
|
INodeDirectorySnapshotRoot);
|
||||||
|
// Check the INode for file1 (snapshot file)
|
||||||
|
INode snapshotFileNode = inodes[inodes.length - 1];
|
||||||
|
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
|
||||||
|
assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
|
||||||
|
assertTrue(snapshotFileNode.getParent() instanceof
|
||||||
|
INodeDirectorySnapshotRoot);
|
||||||
|
|
||||||
|
// Call getExistingPathINodes and request only one INode.
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
assertEquals(inodes.length, 1);
|
||||||
|
assertTrue(nodesInPath.isSnapshot());
|
||||||
|
// The snapshotroot (s1) is not included in inodes. Thus the
|
||||||
|
// snapshotRootIndex should be -1.
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
// Check the INode for file1 (snapshot file)
|
||||||
|
snapshotFileNode = inodes[inodes.length - 1];
|
||||||
|
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
|
||||||
|
assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
|
||||||
|
|
||||||
|
// Call getExistingPathINodes and request 2 INodes.
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
assertEquals(inodes.length, 2);
|
||||||
|
assertTrue(nodesInPath.isSnapshot());
|
||||||
|
// There should be two INodes in inodes: s1 and snapshot of file1. Thus the
|
||||||
|
// SnapshotRootIndex should be 0.
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), 0);
|
||||||
|
snapshotFileNode = inodes[inodes.length - 1];
|
||||||
|
// Check the INode for snapshot of file1
|
||||||
|
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
|
||||||
|
assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
|
||||||
|
|
||||||
|
// Resolve the path "/TestSnapshot/sub1/.snapshot"
|
||||||
|
String dotSnapshotPath = sub1.toString() + "/.snapshot";
|
||||||
|
names = INode.getPathNames(dotSnapshotPath);
|
||||||
|
components = INode.getPathComponents(names);
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
// The number of INodes returned should be components.length - 1 since we
|
||||||
|
// will ignore ".snapshot"
|
||||||
|
assertEquals(inodes.length, components.length - 1);
|
||||||
|
assertTrue(nodesInPath.isSnapshot());
|
||||||
|
// No SnapshotRoot dir is included in the resolved inodes
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
// The last INode should be the INode for sub1
|
||||||
|
assertEquals(inodes[inodes.length - 1].getFullPathName(), sub1.toString());
|
||||||
|
assertFalse(inodes[inodes.length - 1] instanceof INodeFileSnapshot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
|
||||||
|
* for snapshot file after deleting the original file.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSnapshotPathINodesAfterDeletion() throws Exception {
|
||||||
|
// Create a snapshot for the dir, and check the inodes for the path
|
||||||
|
// pointing to a snapshot file
|
||||||
|
hdfs.allowSnapshot(sub1.toString());
|
||||||
|
hdfs.createSnapshot("s1", sub1.toString());
|
||||||
|
|
||||||
|
// Delete the original file /TestSnapshot/sub1/file1
|
||||||
|
hdfs.delete(file1, false);
|
||||||
|
|
||||||
|
// Check the INodes for path /TestSnapshot/sub1/file1
|
||||||
|
String[] names = INode.getPathNames(file1.toString());
|
||||||
|
byte[][] components = INode.getPathComponents(names);
|
||||||
|
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
INode[] inodes = nodesInPath.getINodes();
|
||||||
|
// The length of inodes should be equal to components.length
|
||||||
|
assertEquals(inodes.length, components.length);
|
||||||
|
// The number of non-null elements should be components.length - 1 since
|
||||||
|
// file1 has been deleted
|
||||||
|
assertEquals(nodesInPath.getSize(), components.length - 1);
|
||||||
|
// The returned nodesInPath should be non-snapshot
|
||||||
|
assertFalse(nodesInPath.isSnapshot());
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
// The last INode should be null, and the one before should be associated
|
||||||
|
// with sub1
|
||||||
|
assertNull(inodes[components.length - 1]);
|
||||||
|
assertEquals(inodes[components.length - 2].getFullPathName(),
|
||||||
|
sub1.toString());
|
||||||
|
assertEquals(inodes[components.length - 3].getFullPathName(),
|
||||||
|
dir.toString());
|
||||||
|
|
||||||
|
// Resolve the path for the snapshot file
|
||||||
|
// /TestSnapshot/sub1/.snapshot/s1/file1
|
||||||
|
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
|
||||||
|
names = INode.getPathNames(snapshotPath);
|
||||||
|
components = INode.getPathComponents(names);
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
// Length of inodes should be (components.length - 1), since we will ignore
|
||||||
|
// ".snapshot"
|
||||||
|
assertEquals(inodes.length, components.length - 1);
|
||||||
|
assertTrue(nodesInPath.isSnapshot());
|
||||||
|
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
|
||||||
|
assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof
|
||||||
|
INodeDirectorySnapshotRoot);
|
||||||
|
// Check the INode for file1 (snapshot file)
|
||||||
|
INode snapshotFileNode = inodes[inodes.length - 1];
|
||||||
|
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
|
||||||
|
assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
|
||||||
|
assertTrue(snapshotFileNode.getParent() instanceof
|
||||||
|
INodeDirectorySnapshotRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
|
||||||
|
* for snapshot file while adding a new file after snapshot.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSnapshotPathINodesWithAddedFile() throws Exception {
|
||||||
|
// Create a snapshot for the dir, and check the inodes for the path
|
||||||
|
// pointing to a snapshot file
|
||||||
|
hdfs.allowSnapshot(sub1.toString());
|
||||||
|
hdfs.createSnapshot("s1", sub1.toString());
|
||||||
|
|
||||||
|
// Add a new file /TestSnapshot/sub1/file3
|
||||||
|
final Path file3 = new Path(sub1, "file3");
|
||||||
|
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
|
||||||
|
|
||||||
|
// Check the inodes for /TestSnapshot/sub1/file3
|
||||||
|
String[] names = INode.getPathNames(file3.toString());
|
||||||
|
byte[][] components = INode.getPathComponents(names);
|
||||||
|
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
INode[] inodes = nodesInPath.getINodes();
|
||||||
|
// The number of inodes should be equal to components.length
|
||||||
|
assertEquals(inodes.length, components.length);
|
||||||
|
// The returned nodesInPath should be non-snapshot
|
||||||
|
assertFalse(nodesInPath.isSnapshot());
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
|
||||||
|
// The last INode should be associated with file3
|
||||||
|
assertEquals(inodes[components.length - 1].getFullPathName(),
|
||||||
|
file3.toString());
|
||||||
|
assertEquals(inodes[components.length - 2].getFullPathName(),
|
||||||
|
sub1.toString());
|
||||||
|
assertEquals(inodes[components.length - 3].getFullPathName(),
|
||||||
|
dir.toString());
|
||||||
|
|
||||||
|
// Check the inodes for /TestSnapshot/sub1/.snapshot/s1/file3
|
||||||
|
String snapshotPath = sub1.toString() + "/.snapshot/s1/file3";
|
||||||
|
names = INode.getPathNames(snapshotPath);
|
||||||
|
components = INode.getPathComponents(names);
|
||||||
|
nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
inodes = nodesInPath.getINodes();
|
||||||
|
// Length of inodes should be (components.length - 1), since we will ignore
|
||||||
|
// ".snapshot"
|
||||||
|
assertEquals(inodes.length, components.length - 1);
|
||||||
|
// The number of non-null inodes should be components.length - 2, since
|
||||||
|
// snapshot of file3 does not exist
|
||||||
|
assertEquals(nodesInPath.getSize(), components.length - 2);
|
||||||
|
assertTrue(nodesInPath.isSnapshot());
|
||||||
|
// SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s1, null}
|
||||||
|
assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
|
||||||
|
assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof
|
||||||
|
INodeDirectorySnapshotRoot);
|
||||||
|
// Check the last INode in inodes, which should be null
|
||||||
|
assertNull(inodes[inodes.length - 1]);
|
||||||
|
assertTrue(inodes[inodes.length - 2] instanceof
|
||||||
|
INodeDirectorySnapshotRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
|
||||||
|
* for snapshot file while modifying file after snapshot.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSnapshotPathINodesAfterModification() throws Exception {
|
||||||
|
// First check the INode for /TestSnapshot/sub1/file1
|
||||||
|
String[] names = INode.getPathNames(file1.toString());
|
||||||
|
byte[][] components = INode.getPathComponents(names);
|
||||||
|
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
|
||||||
|
components.length, false);
|
||||||
|
INode[] inodes = nodesInPath.getINodes();
|
||||||
|
// The number of inodes should be equal to components.length
|
||||||
|
assertEquals(inodes.length, components.length);
|
||||||
|
// The last INode should be associated with file1
|
||||||
|
assertEquals(inodes[components.length - 1].getFullPathName(),
|
||||||
|
file1.toString());
|
||||||
|
|
||||||
|
// Create a snapshot for the dir, and check the inodes for the path
|
||||||
|
// pointing to a snapshot file
|
||||||
|
hdfs.allowSnapshot(sub1.toString());
|
||||||
|
hdfs.createSnapshot("s1", sub1.toString());
|
||||||
|
|
||||||
|
// Modify file1
|
||||||
|
DFSTestUtil.appendFile(hdfs, file1, "the content for appending");
|
||||||
|
// Check the INode for /TestSnapshot/sub1/file1 again
|
||||||
|
INodesInPath newNodesInPath = fsdir.rootDir
|
||||||
|
.getExistingPathINodes(components, components.length, false);
|
||||||
|
INode[] newInodes = newNodesInPath.getINodes();
|
||||||
|
// The number of inodes should be equal to components.length
|
||||||
|
assertEquals(newInodes.length, components.length);
|
||||||
|
// The last INode should be associated with file1
|
||||||
|
assertEquals(newInodes[components.length - 1].getFullPathName(),
|
||||||
|
file1.toString());
|
||||||
|
// The modification time of the INode for file3 should have been changed
|
||||||
|
Assert.assertFalse(inodes[components.length - 1].getModificationTime() ==
|
||||||
|
newInodes[components.length - 1].getModificationTime());
|
||||||
|
|
||||||
|
// Check the INodes for snapshot of file1
|
||||||
|
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
|
||||||
|
names = INode.getPathNames(snapshotPath);
|
||||||
|
components = INode.getPathComponents(names);
|
||||||
|
INodesInPath ssNodesInPath = fsdir.rootDir.getExistingPathINodes(
|
||||||
|
components, components.length, false);
|
||||||
|
INode[] ssInodes = ssNodesInPath.getINodes();
|
||||||
|
// Length of ssInodes should be (components.length - 1), since we will
|
||||||
|
// ignore ".snapshot"
|
||||||
|
assertEquals(ssInodes.length, components.length - 1);
|
||||||
|
assertTrue(ssNodesInPath.isSnapshot());
|
||||||
|
// Check the INode for snapshot of file1
|
||||||
|
INode snapshotFileNode = ssInodes[ssInodes.length - 1];
|
||||||
|
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
|
||||||
|
assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
|
||||||
|
// The modification time of the snapshot INode should be the same with the
|
||||||
|
// original INode before modification
|
||||||
|
assertEquals(inodes[inodes.length - 1].getModificationTime(),
|
||||||
|
ssInodes[ssInodes.length - 1].getModificationTime());
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,87 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
/** Test snapshot related operations. */
|
|
||||||
public class TestSnapshot {
|
|
||||||
private static final long seed = 0;
|
|
||||||
private static final short REPLICATION = 3;
|
|
||||||
|
|
||||||
private final Path dir = new Path("/TestSnapshot");
|
|
||||||
|
|
||||||
private final Path sub1 = new Path(dir, "sub1");
|
|
||||||
private final Path file1 = new Path(sub1, "file1");
|
|
||||||
private final Path file2 = new Path(sub1, "file2");
|
|
||||||
|
|
||||||
private Configuration conf;
|
|
||||||
private MiniDFSCluster cluster;
|
|
||||||
private FSNamesystem fsn;
|
|
||||||
private FSDirectory fsdir;
|
|
||||||
|
|
||||||
private DistributedFileSystem hdfs;
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setUp() throws Exception {
|
|
||||||
conf = new Configuration();
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
|
||||||
.numDataNodes(REPLICATION)
|
|
||||||
.build();
|
|
||||||
cluster.waitActive();
|
|
||||||
|
|
||||||
fsn = cluster.getNamesystem();
|
|
||||||
fsdir = fsn.getFSDirectory();
|
|
||||||
|
|
||||||
hdfs = cluster.getFileSystem();
|
|
||||||
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
|
|
||||||
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
|
|
||||||
}
|
|
||||||
|
|
||||||
@After
|
|
||||||
public void tearDown() throws Exception {
|
|
||||||
if (cluster != null) {
|
|
||||||
cluster.shutdown();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Test allow-snapshot operation. */
|
|
||||||
@Test
|
|
||||||
public void testAllowSnapshot() throws Exception {
|
|
||||||
final String path = sub1.toString();
|
|
||||||
final INode before = fsdir.getINode(path);
|
|
||||||
Assert.assertTrue(before instanceof INodeDirectory);
|
|
||||||
Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
|
|
||||||
|
|
||||||
hdfs.allowSnapshot(path);
|
|
||||||
final INode after = fsdir.getINode(path);
|
|
||||||
Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue