HDFS-4749. Use INodeId to identify the corresponding directory node in FSImage saving/loading. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1475902 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e8b64d3851
commit
745bfa0311
|
@ -299,3 +299,6 @@ Branch-2802 Snapshot (Unreleased)
|
||||||
|
|
||||||
HDFS-4729. Fix OfflineImageViewer and permission checking for snapshot
|
HDFS-4729. Fix OfflineImageViewer and permission checking for snapshot
|
||||||
operations. (Jing Zhao via szetszwo)
|
operations. (Jing Zhao via szetszwo)
|
||||||
|
|
||||||
|
HDFS-4749. Use INodeId to identify the corresponding directory node in
|
||||||
|
FSImage saving/loading. (Jing Zhao via szetszwo)
|
||||||
|
|
|
@ -2609,7 +2609,6 @@ public class FSDirectory implements Closeable {
|
||||||
inodeMap = null;
|
inodeMap = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
INode getInode(long id) {
|
INode getInode(long id) {
|
||||||
INode inode = new INodeWithAdditionalFields(id, null, new PermissionStatus(
|
INode inode = new INodeWithAdditionalFields(id, null, new PermissionStatus(
|
||||||
"", "", new FsPermission((short) 0)), 0, 0) {
|
"", "", new FsPermission((short) 0)), 0, 0) {
|
||||||
|
|
|
@ -27,15 +27,13 @@ import java.io.FileInputStream;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.security.DigestInputStream;
|
import java.security.DigestInputStream;
|
||||||
import java.security.DigestOutputStream;
|
import java.security.DigestOutputStream;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
@ -46,7 +44,6 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
|
@ -423,9 +420,9 @@ public class FSImageFormat {
|
||||||
private void loadDirectoryWithSnapshot(DataInput in)
|
private void loadDirectoryWithSnapshot(DataInput in)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// Step 1. Identify the parent INode
|
// Step 1. Identify the parent INode
|
||||||
String parentPath = FSImageSerialization.readString(in);
|
long inodeId = in.readLong();
|
||||||
final INodeDirectory parent = INodeDirectory.valueOf(
|
final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
|
||||||
namesystem.dir.rootDir.getNode(parentPath, false), parentPath);
|
.asDirectory();
|
||||||
|
|
||||||
// Check if the whole subtree has been saved (for reference nodes)
|
// Check if the whole subtree has been saved (for reference nodes)
|
||||||
boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
|
boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
|
||||||
|
@ -437,7 +434,7 @@ public class FSImageFormat {
|
||||||
int numSnapshots = in.readInt();
|
int numSnapshots = in.readInt();
|
||||||
if (numSnapshots >= 0) {
|
if (numSnapshots >= 0) {
|
||||||
final INodeDirectorySnapshottable snapshottableParent
|
final INodeDirectorySnapshottable snapshottableParent
|
||||||
= INodeDirectorySnapshottable.valueOf(parent, parentPath);
|
= INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
|
||||||
if (snapshottableParent.getParent() != null) { // not root
|
if (snapshottableParent.getParent() != null) { // not root
|
||||||
this.namesystem.getSnapshotManager().addSnapshottable(
|
this.namesystem.getSnapshotManager().addSnapshottable(
|
||||||
snapshottableParent);
|
snapshottableParent);
|
||||||
|
@ -563,8 +560,10 @@ public class FSImageFormat {
|
||||||
final byte[] localName = FSImageSerialization.readLocalName(in);
|
final byte[] localName = FSImageSerialization.readLocalName(in);
|
||||||
INode inode = loadINode(localName, isSnapshotINode, in);
|
INode inode = loadINode(localName, isSnapshotINode, in);
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
|
if (LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
|
||||||
|
if (!inode.isReference()) { // reference node does not have its id
|
||||||
namesystem.dir.addToInodeMapUnprotected(inode);
|
namesystem.dir.addToInodeMapUnprotected(inode);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return inode;
|
return inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -789,8 +788,6 @@ public class FSImageFormat {
|
||||||
private MD5Hash savedDigest;
|
private MD5Hash savedDigest;
|
||||||
private final ReferenceMap referenceMap = new ReferenceMap();
|
private final ReferenceMap referenceMap = new ReferenceMap();
|
||||||
|
|
||||||
static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR);
|
|
||||||
|
|
||||||
/** @throws IllegalStateException if the instance has not yet saved an image */
|
/** @throws IllegalStateException if the instance has not yet saved an image */
|
||||||
private void checkSaved() {
|
private void checkSaved() {
|
||||||
if (!saved) {
|
if (!saved) {
|
||||||
|
@ -852,18 +849,15 @@ public class FSImageFormat {
|
||||||
LOG.info("Saving image file " + newFile +
|
LOG.info("Saving image file " + newFile +
|
||||||
" using " + compression);
|
" using " + compression);
|
||||||
|
|
||||||
byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH];
|
|
||||||
ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
|
|
||||||
// save the root
|
// save the root
|
||||||
FSImageSerialization.saveINode2Image(fsDir.rootDir, out, false,
|
FSImageSerialization.saveINode2Image(fsDir.rootDir, out, false,
|
||||||
referenceMap);
|
referenceMap);
|
||||||
// save the rest of the nodes
|
// save the rest of the nodes
|
||||||
saveImage(strbuf, fsDir.rootDir, out, null, true);
|
saveImage(fsDir.rootDir, out, true);
|
||||||
// save files under construction
|
// save files under construction
|
||||||
sourceNamesystem.saveFilesUnderConstruction(out);
|
sourceNamesystem.saveFilesUnderConstruction(out);
|
||||||
context.checkCancelled();
|
context.checkCancelled();
|
||||||
sourceNamesystem.saveSecretManagerState(out);
|
sourceNamesystem.saveSecretManagerState(out);
|
||||||
strbuf = null;
|
|
||||||
context.checkCancelled();
|
context.checkCancelled();
|
||||||
out.flush();
|
out.flush();
|
||||||
context.checkCancelled();
|
context.checkCancelled();
|
||||||
|
@ -905,40 +899,12 @@ public class FSImageFormat {
|
||||||
return dirNum;
|
return dirNum;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The nonSnapshotPath is a path without snapshot in order to enable buffer
|
|
||||||
* reuse. If the snapshot is not null, we need to compute a snapshot path.
|
|
||||||
* E.g., when nonSnapshotPath is "/test/foo/bar/" and the snapshot is s1 of
|
|
||||||
* /test, we actually want to save image for directory /test/foo/bar/ under
|
|
||||||
* snapshot s1 of /test, and the path to save thus should be
|
|
||||||
* "/test/.snapshot/s1/foo/bar/".
|
|
||||||
*
|
|
||||||
* @param nonSnapshotPath The path without snapshot related information.
|
|
||||||
* @param snapshot The snapshot associated with the inode that the path
|
|
||||||
* actually leads to.
|
|
||||||
* @return The snapshot path.
|
|
||||||
*/
|
|
||||||
private static String computeSnapshotPath(String nonSnapshotPath,
|
|
||||||
Snapshot snapshot) {
|
|
||||||
String snapshotParentFullPath = snapshot.getRoot().getParent()
|
|
||||||
.getFullPathName();
|
|
||||||
String snapshotName = snapshot.getRoot().getLocalName();
|
|
||||||
String relativePath = nonSnapshotPath.equals(snapshotParentFullPath) ?
|
|
||||||
Path.SEPARATOR : nonSnapshotPath.substring(
|
|
||||||
snapshotParentFullPath.length());
|
|
||||||
return Snapshot.getSnapshotPath(snapshotParentFullPath,
|
|
||||||
snapshotName + relativePath);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Save file tree image starting from the given root.
|
* Save file tree image starting from the given root.
|
||||||
* This is a recursive procedure, which first saves all children and
|
* This is a recursive procedure, which first saves all children and
|
||||||
* snapshot diffs of a current directory and then moves inside the
|
* snapshot diffs of a current directory and then moves inside the
|
||||||
* sub-directories.
|
* sub-directories.
|
||||||
*
|
*
|
||||||
* @param currentDirName A ByteBuffer storing the path leading to the
|
|
||||||
* current node. For a snapshot node, the path is
|
|
||||||
* (the snapshot path - ".snapshot/snapshot_name")
|
|
||||||
* @param current The current node
|
* @param current The current node
|
||||||
* @param out The DataoutputStream to write the image
|
* @param out The DataoutputStream to write the image
|
||||||
* @param snapshot The possible snapshot associated with the current node
|
* @param snapshot The possible snapshot associated with the current node
|
||||||
|
@ -946,28 +912,10 @@ public class FSImageFormat {
|
||||||
* reference node, its subtree may already have been
|
* reference node, its subtree may already have been
|
||||||
* saved before.
|
* saved before.
|
||||||
*/
|
*/
|
||||||
private void saveImage(ByteBuffer currentDirName, INodeDirectory current,
|
private void saveImage(INodeDirectory current, DataOutputStream out,
|
||||||
DataOutputStream out, Snapshot snapshot, boolean toSaveSubtree)
|
boolean toSaveSubtree) throws IOException {
|
||||||
throws IOException {
|
// write the inode id of the directory
|
||||||
// 1. Print prefix (parent directory name)
|
out.writeLong(current.getId());
|
||||||
int prefixLen = currentDirName.position();
|
|
||||||
if (snapshot == null) {
|
|
||||||
if (prefixLen == 0) { // root
|
|
||||||
out.writeShort(PATH_SEPARATOR.length);
|
|
||||||
out.write(PATH_SEPARATOR);
|
|
||||||
} else { // non-root directories
|
|
||||||
out.writeShort(prefixLen);
|
|
||||||
out.write(currentDirName.array(), 0, prefixLen);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
String nonSnapshotPath = prefixLen == 0 ? Path.SEPARATOR : DFSUtil
|
|
||||||
.bytes2String(currentDirName.array(), 0, prefixLen);
|
|
||||||
String snapshotFullPath = computeSnapshotPath(nonSnapshotPath,
|
|
||||||
snapshot);
|
|
||||||
byte[] snapshotFullPathBytes = DFSUtil.string2Bytes(snapshotFullPath);
|
|
||||||
out.writeShort(snapshotFullPathBytes.length);
|
|
||||||
out.write(snapshotFullPathBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!toSaveSubtree) {
|
if (!toSaveSubtree) {
|
||||||
return;
|
return;
|
||||||
|
@ -975,11 +923,12 @@ public class FSImageFormat {
|
||||||
|
|
||||||
final ReadOnlyList<INode> children = current.getChildrenList(null);
|
final ReadOnlyList<INode> children = current.getChildrenList(null);
|
||||||
int dirNum = 0;
|
int dirNum = 0;
|
||||||
Map<Snapshot, List<INodeDirectory>> snapshotDirMap = null;
|
List<INodeDirectory> snapshotDirs = null;
|
||||||
if (current instanceof INodeDirectoryWithSnapshot) {
|
if (current instanceof INodeDirectoryWithSnapshot) {
|
||||||
snapshotDirMap = new HashMap<Snapshot, List<INodeDirectory>>();
|
snapshotDirs = new ArrayList<INodeDirectory>();
|
||||||
dirNum += ((INodeDirectoryWithSnapshot) current).
|
((INodeDirectoryWithSnapshot) current).getSnapshotDirectory(
|
||||||
getSnapshotDirectory(snapshotDirMap);
|
snapshotDirs);
|
||||||
|
dirNum += snapshotDirs.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all
|
// 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all
|
||||||
|
@ -1008,20 +957,14 @@ public class FSImageFormat {
|
||||||
// make sure we only save the subtree under a reference node once
|
// make sure we only save the subtree under a reference node once
|
||||||
boolean toSave = child.isReference() ?
|
boolean toSave = child.isReference() ?
|
||||||
referenceMap.toProcessSubtree(child.getId()) : true;
|
referenceMap.toProcessSubtree(child.getId()) : true;
|
||||||
currentDirName.put(PATH_SEPARATOR).put(child.getLocalNameBytes());
|
saveImage(child.asDirectory(), out, toSave);
|
||||||
saveImage(currentDirName, child.asDirectory(), out, snapshot, toSave);
|
|
||||||
currentDirName.position(prefixLen);
|
|
||||||
}
|
}
|
||||||
if (snapshotDirMap != null) {
|
if (snapshotDirs != null) {
|
||||||
for (Entry<Snapshot, List<INodeDirectory>> e : snapshotDirMap.entrySet()) {
|
for (INodeDirectory subDir : snapshotDirs) {
|
||||||
for (INodeDirectory subDir : e.getValue()) {
|
|
||||||
// make sure we only save the subtree under a reference node once
|
// make sure we only save the subtree under a reference node once
|
||||||
boolean toSave = subDir.getParentReference() != null ?
|
boolean toSave = subDir.getParentReference() != null ?
|
||||||
referenceMap.toProcessSubtree(subDir.getId()) : true;
|
referenceMap.toProcessSubtree(subDir.getId()) : true;
|
||||||
currentDirName.put(PATH_SEPARATOR).put(subDir.getLocalNameBytes());
|
saveImage(subDir, out, toSave);
|
||||||
saveImage(currentDirName, subDir, out, e.getKey(), toSave);
|
|
||||||
currentDirName.position(prefixLen);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,19 +25,18 @@ import java.util.Collections;
|
||||||
import java.util.Deque;
|
import java.util.Deque;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Content;
|
import org.apache.hadoop.hdfs.server.namenode.Content;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
|
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||||
import org.apache.hadoop.hdfs.util.Diff;
|
import org.apache.hadoop.hdfs.util.Diff;
|
||||||
|
@ -159,15 +158,13 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
writeDeleted(out, referenceMap);
|
writeDeleted(out, referenceMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return The list of INodeDirectory contained in the deleted list */
|
/** Get the list of INodeDirectory contained in the deleted list */
|
||||||
private List<INodeDirectory> getDirsInDeleted() {
|
private void getDirsInDeleted(List<INodeDirectory> dirList) {
|
||||||
List<INodeDirectory> dirList = new ArrayList<INodeDirectory>();
|
|
||||||
for (INode node : getList(ListType.DELETED)) {
|
for (INode node : getList(ListType.DELETED)) {
|
||||||
if (node.isDirectory()) {
|
if (node.isDirectory()) {
|
||||||
dirList.add(node.asDirectory());
|
dirList.add(node.asDirectory());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dirList;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -794,22 +791,12 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
* Get all the directories that are stored in some snapshot but not in the
|
* Get all the directories that are stored in some snapshot but not in the
|
||||||
* current children list. These directories are equivalent to the directories
|
* current children list. These directories are equivalent to the directories
|
||||||
* stored in the deletes lists.
|
* stored in the deletes lists.
|
||||||
*
|
|
||||||
* @param snapshotDirMap A snapshot-to-directory-list map for returning.
|
|
||||||
* @return The number of directories returned.
|
|
||||||
*/
|
*/
|
||||||
public int getSnapshotDirectory(
|
public void getSnapshotDirectory(List<INodeDirectory> snapshotDir) {
|
||||||
Map<Snapshot, List<INodeDirectory>> snapshotDirMap) {
|
|
||||||
int dirNum = 0;
|
|
||||||
for (DirectoryDiff sdiff : diffs) {
|
for (DirectoryDiff sdiff : diffs) {
|
||||||
List<INodeDirectory> list = sdiff.getChildrenDiff().getDirsInDeleted();
|
sdiff.getChildrenDiff().getDirsInDeleted(snapshotDir);
|
||||||
if (list.size() > 0) {
|
|
||||||
snapshotDirMap.put(sdiff.snapshot, list);
|
|
||||||
dirNum += list.size();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dirNum;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
||||||
|
|
|
@ -905,7 +905,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.rename(foo_dir3, foo_dir2);
|
hdfs.rename(foo_dir3, foo_dir2);
|
||||||
hdfs.rename(bar_dir3, bar_dir2);
|
hdfs.rename(bar_dir3, bar_dir2);
|
||||||
|
|
||||||
// // modification on /dir2/foo
|
// modification on /dir2/foo
|
||||||
hdfs.setReplication(bar1_dir2, REPL);
|
hdfs.setReplication(bar1_dir2, REPL);
|
||||||
hdfs.setReplication(bar_dir2, REPL);
|
hdfs.setReplication(bar_dir2, REPL);
|
||||||
|
|
||||||
|
@ -1173,11 +1173,15 @@ public class TestRenameWithSnapshots {
|
||||||
// delete snapshot s2.
|
// delete snapshot s2.
|
||||||
hdfs.deleteSnapshot(sdir2, "s2");
|
hdfs.deleteSnapshot(sdir2, "s2");
|
||||||
assertFalse(hdfs.exists(bar_s2));
|
assertFalse(hdfs.exists(bar_s2));
|
||||||
|
|
||||||
// restart the cluster and check fsimage
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage();
|
||||||
|
// make sure the whole referred subtree has been destroyed
|
||||||
|
assertEquals(4, fsdir.getRoot().getNamespace());
|
||||||
|
assertEquals(0, fsdir.getRoot().getDiskspace());
|
||||||
|
|
||||||
hdfs.deleteSnapshot(sdir1, "s1");
|
hdfs.deleteSnapshot(sdir1, "s1");
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage();
|
||||||
|
assertEquals(3, fsdir.getRoot().getNamespace());
|
||||||
|
assertEquals(0, fsdir.getRoot().getDiskspace());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1456,4 +1460,26 @@ public class TestRenameWithSnapshots {
|
||||||
assertEquals(2, foo3_wc.getReferenceCount());
|
assertEquals(2, foo3_wc.getReferenceCount());
|
||||||
assertSame(foo3Node, foo3_wc.getParentReference());
|
assertSame(foo3Node, foo3_wc.getParentReference());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRename2PreDescendant() throws Exception {
|
||||||
|
final Path sdir1 = new Path("/dir1");
|
||||||
|
final Path sdir2 = new Path("/dir2");
|
||||||
|
final Path foo = new Path(sdir1, "foo");
|
||||||
|
final Path bar = new Path(foo, "bar");
|
||||||
|
hdfs.mkdirs(bar);
|
||||||
|
hdfs.mkdirs(sdir2);
|
||||||
|
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sdir1, snap1);
|
||||||
|
|
||||||
|
// /dir1/foo/bar -> /dir2/bar
|
||||||
|
final Path bar2 = new Path(sdir2, "bar");
|
||||||
|
hdfs.rename(bar, bar2);
|
||||||
|
|
||||||
|
// /dir1/foo -> /dir2/bar/foo
|
||||||
|
final Path foo2 = new Path(bar2, "foo");
|
||||||
|
hdfs.rename(foo, foo2);
|
||||||
|
|
||||||
|
restartClusterAndCheckImage();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue