HDFS-4523. Fix INodeFile replacement, TestQuota and javac errors from trunk merge.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1450477 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
aa82b03823
commit
e2a618e1cc
|
@ -176,3 +176,6 @@ Branch-2802 Snapshot (Unreleased)
|
||||||
|
|
||||||
HDFS-4514. Add CLI for supporting snapshot rename, diff report, and
|
HDFS-4514. Add CLI for supporting snapshot rename, diff report, and
|
||||||
snapshottable directory listing. (Jing Zhao via szetszwo)
|
snapshottable directory listing. (Jing Zhao via szetszwo)
|
||||||
|
|
||||||
|
HDFS-4523. Fix INodeFile replacement, TestQuota and javac errors from trunk
|
||||||
|
merge. (szetszwo)
|
||||||
|
|
|
@ -335,7 +335,7 @@ public class FSDirectory implements Closeable {
|
||||||
INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path);
|
INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path);
|
||||||
|
|
||||||
// check quota limits and updated space consumed
|
// check quota limits and updated space consumed
|
||||||
updateCount(inodesInPath, 0, 0, fileINode.getBlockDiskspace(), true);
|
updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true);
|
||||||
|
|
||||||
// associate new last block for the file
|
// associate new last block for the file
|
||||||
BlockInfoUnderConstruction blockInfo =
|
BlockInfoUnderConstruction blockInfo =
|
||||||
|
@ -426,7 +426,7 @@ public class FSDirectory implements Closeable {
|
||||||
|
|
||||||
// update space consumed
|
// update space consumed
|
||||||
final INodesInPath iip = rootDir.getINodesInPath4Write(path, true);
|
final INodesInPath iip = rootDir.getINodesInPath4Write(path, true);
|
||||||
updateCount(iip, 0, 0, -fileNode.getBlockDiskspace(), true);
|
updateCount(iip, 0, -fileNode.getBlockDiskspace(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -969,15 +969,11 @@ public class FSDirectory implements Closeable {
|
||||||
INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
|
INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
|
||||||
final Snapshot trgLatestSnapshot = trgINodesInPath.getLatestSnapshot();
|
final Snapshot trgLatestSnapshot = trgINodesInPath.getLatestSnapshot();
|
||||||
|
|
||||||
INodeFile [] allSrcInodes = new INodeFile[srcs.length];
|
final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
|
||||||
int i = 0;
|
for(int i = 0; i < srcs.length; i++) {
|
||||||
int totalBlocks = 0;
|
allSrcInodes[i] = (INodeFile)getINode4Write(srcs[i]);
|
||||||
for(String src : srcs) {
|
|
||||||
INodeFile srcInode = (INodeFile)getINode(src);
|
|
||||||
allSrcInodes[i++] = srcInode;
|
|
||||||
totalBlocks += srcInode.numBlocks();
|
|
||||||
}
|
}
|
||||||
trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks
|
trgInode.concatBlocks(allSrcInodes);
|
||||||
|
|
||||||
// since we are in the same dir - we can use same parent to remove files
|
// since we are in the same dir - we can use same parent to remove files
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
@ -1908,17 +1904,16 @@ public class FSDirectory implements Closeable {
|
||||||
private INode removeLastINode(final INodesInPath inodesInPath) {
|
private INode removeLastINode(final INodesInPath inodesInPath) {
|
||||||
final INode[] inodes = inodesInPath.getINodes();
|
final INode[] inodes = inodesInPath.getINodes();
|
||||||
final int pos = inodes.length - 1;
|
final int pos = inodes.length - 1;
|
||||||
INode removedNode = ((INodeDirectory)inodes[pos-1]).removeChild(
|
final boolean removed = ((INodeDirectory)inodes[pos-1]).removeChild(
|
||||||
inodes[pos], inodesInPath.getLatestSnapshot());
|
inodes[pos], inodesInPath.getLatestSnapshot());
|
||||||
if (removedNode != null) {
|
if (removed) {
|
||||||
Preconditions.checkState(removedNode == inodes[pos]);
|
inodesInPath.setINode(pos - 1, inodes[pos].getParent());
|
||||||
|
final Quota.Counts counts = inodes[pos].computeQuotaUsage();
|
||||||
inodesInPath.setINode(pos - 1, removedNode.getParent());
|
|
||||||
final Quota.Counts counts = removedNode.computeQuotaUsage();
|
|
||||||
updateCountNoQuotaCheck(inodesInPath, pos,
|
updateCountNoQuotaCheck(inodesInPath, pos,
|
||||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||||
|
return inodes[pos];
|
||||||
}
|
}
|
||||||
return removedNode;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1556,7 +1556,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
// check replication and blocks size
|
// check replication and blocks size
|
||||||
if(repl != srcInode.getFileReplication()) {
|
if(repl != srcInode.getBlockReplication()) {
|
||||||
throw new HadoopIllegalArgumentException("concat: the soruce file "
|
throw new HadoopIllegalArgumentException("concat: the soruce file "
|
||||||
+ src + " and the target file " + target
|
+ src + " and the target file " + target
|
||||||
+ " should have the same replication: source replication is "
|
+ " should have the same replication: source replication is "
|
||||||
|
@ -5722,6 +5722,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
|
|
||||||
/** Allow snapshot on a directroy. */
|
/** Allow snapshot on a directroy. */
|
||||||
public void allowSnapshot(String path) throws SafeModeException, IOException {
|
public void allowSnapshot(String path) throws SafeModeException, IOException {
|
||||||
|
final FSPermissionChecker pc = getPermissionChecker();
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
|
@ -5729,7 +5730,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
throw new SafeModeException("Cannot allow snapshot for " + path,
|
throw new SafeModeException("Cannot allow snapshot for " + path,
|
||||||
safeMode);
|
safeMode);
|
||||||
}
|
}
|
||||||
checkOwner(path);
|
checkOwner(pc, path);
|
||||||
|
|
||||||
snapshotManager.setSnapshottable(path);
|
snapshotManager.setSnapshottable(path);
|
||||||
getEditLog().logAllowSnapshot(path);
|
getEditLog().logAllowSnapshot(path);
|
||||||
|
@ -5749,6 +5750,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
/** Disallow snapshot on a directory. */
|
/** Disallow snapshot on a directory. */
|
||||||
public void disallowSnapshot(String path)
|
public void disallowSnapshot(String path)
|
||||||
throws SafeModeException, IOException {
|
throws SafeModeException, IOException {
|
||||||
|
final FSPermissionChecker pc = getPermissionChecker();
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
|
@ -5756,7 +5758,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
throw new SafeModeException("Cannot disallow snapshot for " + path,
|
throw new SafeModeException("Cannot disallow snapshot for " + path,
|
||||||
safeMode);
|
safeMode);
|
||||||
}
|
}
|
||||||
checkOwner(path);
|
checkOwner(pc, path);
|
||||||
|
|
||||||
snapshotManager.resetSnapshottable(path);
|
snapshotManager.resetSnapshottable(path);
|
||||||
getEditLog().logDisallowSnapshot(path);
|
getEditLog().logDisallowSnapshot(path);
|
||||||
|
@ -5780,6 +5782,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
*/
|
*/
|
||||||
public void createSnapshot(String snapshotRoot, String snapshotName)
|
public void createSnapshot(String snapshotRoot, String snapshotName)
|
||||||
throws SafeModeException, IOException {
|
throws SafeModeException, IOException {
|
||||||
|
final FSPermissionChecker pc = getPermissionChecker();
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
|
@ -5787,7 +5790,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
throw new SafeModeException("Cannot create snapshot for "
|
throw new SafeModeException("Cannot create snapshot for "
|
||||||
+ snapshotRoot, safeMode);
|
+ snapshotRoot, safeMode);
|
||||||
}
|
}
|
||||||
checkOwner(snapshotRoot);
|
checkOwner(pc, snapshotRoot);
|
||||||
|
|
||||||
dir.writeLock();
|
dir.writeLock();
|
||||||
try {
|
try {
|
||||||
|
@ -5819,6 +5822,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
*/
|
*/
|
||||||
public void renameSnapshot(String path, String snapshotOldName,
|
public void renameSnapshot(String path, String snapshotOldName,
|
||||||
String snapshotNewName) throws SafeModeException, IOException {
|
String snapshotNewName) throws SafeModeException, IOException {
|
||||||
|
final FSPermissionChecker pc = getPermissionChecker();
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
|
@ -5826,7 +5830,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
throw new SafeModeException("Cannot rename snapshot for " + path,
|
throw new SafeModeException("Cannot rename snapshot for " + path,
|
||||||
safeMode);
|
safeMode);
|
||||||
}
|
}
|
||||||
checkOwner(path);
|
checkOwner(pc, path);
|
||||||
// TODO: check if the new name is valid. May also need this for
|
// TODO: check if the new name is valid. May also need this for
|
||||||
// creationSnapshot
|
// creationSnapshot
|
||||||
|
|
||||||
|
@ -5863,8 +5867,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
checkOperation(OperationCategory.READ);
|
checkOperation(OperationCategory.READ);
|
||||||
FSPermissionChecker checker = new FSPermissionChecker(
|
FSPermissionChecker checker = new FSPermissionChecker(
|
||||||
fsOwner.getShortUserName(), supergroup);
|
fsOwner.getShortUserName(), supergroup);
|
||||||
status = snapshotManager
|
final String user = checker.isSuperUser()? null : checker.getUser();
|
||||||
.getSnapshottableDirListing(checker.isSuper ? null : checker.user);
|
status = snapshotManager.getSnapshottableDirListing(user);
|
||||||
} finally {
|
} finally {
|
||||||
readUnlock();
|
readUnlock();
|
||||||
}
|
}
|
||||||
|
@ -5919,6 +5923,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
*/
|
*/
|
||||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||||
throws SafeModeException, IOException {
|
throws SafeModeException, IOException {
|
||||||
|
final FSPermissionChecker pc = getPermissionChecker();
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
|
@ -5926,7 +5931,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
throw new SafeModeException(
|
throw new SafeModeException(
|
||||||
"Cannot delete snapshot for " + snapshotRoot, safeMode);
|
"Cannot delete snapshot for " + snapshotRoot, safeMode);
|
||||||
}
|
}
|
||||||
checkOwner(snapshotRoot);
|
checkOwner(pc, snapshotRoot);
|
||||||
|
|
||||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||||
dir.writeLock();
|
dir.writeLock();
|
||||||
|
|
|
@ -123,18 +123,41 @@ public class INodeDirectory extends INode {
|
||||||
*
|
*
|
||||||
* @param child the child inode to be removed
|
* @param child the child inode to be removed
|
||||||
* @param latest See {@link INode#recordModification(Snapshot)}.
|
* @param latest See {@link INode#recordModification(Snapshot)}.
|
||||||
* @return the removed child inode.
|
|
||||||
*/
|
*/
|
||||||
public INode removeChild(INode child, Snapshot latest) {
|
public boolean removeChild(INode child, Snapshot latest) {
|
||||||
assertChildrenNonNull();
|
|
||||||
|
|
||||||
if (isInLatestSnapshot(latest)) {
|
if (isInLatestSnapshot(latest)) {
|
||||||
return replaceSelf4INodeDirectoryWithSnapshot()
|
return replaceSelf4INodeDirectoryWithSnapshot()
|
||||||
.removeChild(child, latest);
|
.removeChild(child, latest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return removeChild(child);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove the specified child from this directory.
|
||||||
|
* The basic remove method which actually calls children.remove(..).
|
||||||
|
*
|
||||||
|
* @param child the child inode to be removed
|
||||||
|
*
|
||||||
|
* @return true if the child is removed; false if the child is not found.
|
||||||
|
*/
|
||||||
|
protected final boolean removeChild(final INode child) {
|
||||||
|
assertChildrenNonNull();
|
||||||
final int i = searchChildren(child.getLocalNameBytes());
|
final int i = searchChildren(child.getLocalNameBytes());
|
||||||
return i >= 0? children.remove(i): null;
|
if (i < 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
final INode removed = children.remove(i);
|
||||||
|
Preconditions.checkState(removed == child);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove the specified child and all its snapshot copies from this directory.
|
||||||
|
*/
|
||||||
|
public boolean removeChildAndAllSnapshotCopies(INode child) {
|
||||||
|
return removeChild(child);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -197,13 +220,18 @@ public class INodeDirectory extends INode {
|
||||||
oldChild.clearReferences();
|
oldChild.clearReferences();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void replaceChildFile(final INodeFile oldChild, final INodeFile newChild) {
|
||||||
|
replaceChild(oldChild, newChild);
|
||||||
|
newChild.updateBlockCollection();
|
||||||
|
}
|
||||||
|
|
||||||
/** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */
|
/** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */
|
||||||
INodeFileWithSnapshot replaceChild4INodeFileWithSnapshot(
|
INodeFileWithSnapshot replaceChild4INodeFileWithSnapshot(
|
||||||
final INodeFile child) {
|
final INodeFile child) {
|
||||||
Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot),
|
Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot),
|
||||||
"Child file is already an INodeFileWithSnapshot, child=" + child);
|
"Child file is already an INodeFileWithSnapshot, child=" + child);
|
||||||
final INodeFileWithSnapshot newChild = new INodeFileWithSnapshot(child);
|
final INodeFileWithSnapshot newChild = new INodeFileWithSnapshot(child);
|
||||||
replaceChild(child, newChild);
|
replaceChildFile(child, newChild);
|
||||||
return newChild;
|
return newChild;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +242,7 @@ public class INodeDirectory extends INode {
|
||||||
"Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child);
|
"Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child);
|
||||||
final INodeFileUnderConstructionWithSnapshot newChild
|
final INodeFileUnderConstructionWithSnapshot newChild
|
||||||
= new INodeFileUnderConstructionWithSnapshot(child, null);
|
= new INodeFileUnderConstructionWithSnapshot(child, null);
|
||||||
replaceChild(child, newChild);
|
replaceChildFile(child, newChild);
|
||||||
return newChild;
|
return newChild;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -840,7 +868,11 @@ public class INodeDirectory extends INode {
|
||||||
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
||||||
final Snapshot snapshot) {
|
final Snapshot snapshot) {
|
||||||
super.dumpTreeRecursively(out, prefix, snapshot);
|
super.dumpTreeRecursively(out, prefix, snapshot);
|
||||||
out.println(", childrenSize=" + getChildrenList(snapshot).size());
|
out.print(", childrenSize=" + getChildrenList(snapshot).size());
|
||||||
|
if (this instanceof INodeDirectoryWithQuota) {
|
||||||
|
// out.print(((INodeDirectoryWithQuota)this).quotaString());
|
||||||
|
}
|
||||||
|
out.println();
|
||||||
|
|
||||||
if (prefix.length() >= 2) {
|
if (prefix.length() >= 2) {
|
||||||
prefix.setLength(prefix.length() - 2);
|
prefix.setLength(prefix.length() - 2);
|
||||||
|
|
|
@ -173,4 +173,14 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
|
||||||
throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
|
throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String namespaceString() {
|
||||||
|
return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
|
||||||
|
}
|
||||||
|
String diskspaceString() {
|
||||||
|
return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota);
|
||||||
|
}
|
||||||
|
String quotaString() {
|
||||||
|
return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -196,11 +196,23 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
return this.blocks;
|
return this.blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateBlockCollection() {
|
||||||
|
if (blocks != null) {
|
||||||
|
for(BlockInfo b : blocks) {
|
||||||
|
b.setBlockCollection(this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* append array of blocks to this.blocks
|
* append array of blocks to this.blocks
|
||||||
*/
|
*/
|
||||||
void appendBlocks(INodeFile [] inodes, int totalAddedBlocks) {
|
void concatBlocks(INodeFile[] inodes) {
|
||||||
int size = this.blocks.length;
|
int size = this.blocks.length;
|
||||||
|
int totalAddedBlocks = 0;
|
||||||
|
for(INodeFile f : inodes) {
|
||||||
|
totalAddedBlocks += f.blocks.length;
|
||||||
|
}
|
||||||
|
|
||||||
BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
|
BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
|
||||||
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
System.arraycopy(this.blocks, 0, newlist, 0, size);
|
||||||
|
@ -210,10 +222,8 @@ public class INodeFile extends INode implements BlockCollection {
|
||||||
size += in.blocks.length;
|
size += in.blocks.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(BlockInfo bi: newlist) {
|
|
||||||
bi.setBlockCollection(this);
|
|
||||||
}
|
|
||||||
setBlocks(newlist);
|
setBlocks(newlist);
|
||||||
|
updateBlockCollection();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
|
||||||
|
@ -60,6 +60,16 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
return getCreatedList().set(c, newChild);
|
return getCreatedList().set(c, newChild);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private final boolean removeCreatedChild(final int c, final INode child) {
|
||||||
|
final List<INode> created = getCreatedList();
|
||||||
|
if (created.get(c) == child) {
|
||||||
|
final INode removed = created.remove(c);
|
||||||
|
Preconditions.checkState(removed == child);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/** clear the created list */
|
/** clear the created list */
|
||||||
private int destroyCreatedList(
|
private int destroyCreatedList(
|
||||||
final INodeDirectoryWithSnapshot currentINode,
|
final INodeDirectoryWithSnapshot currentINode,
|
||||||
|
@ -499,16 +509,16 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public INode removeChild(INode child, Snapshot latest) {
|
public boolean removeChild(INode child, Snapshot latest) {
|
||||||
ChildrenDiff diff = null;
|
ChildrenDiff diff = null;
|
||||||
UndoInfo<INode> undoInfo = null;
|
UndoInfo<INode> undoInfo = null;
|
||||||
if (latest != null) {
|
if (latest != null) {
|
||||||
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
||||||
undoInfo = diff.delete(child);
|
undoInfo = diff.delete(child);
|
||||||
}
|
}
|
||||||
final INode removed = super.removeChild(child, null);
|
final boolean removed = removeChild(child);
|
||||||
if (undoInfo != null) {
|
if (undoInfo != null) {
|
||||||
if (removed == null) {
|
if (!removed) {
|
||||||
//remove failed, undo
|
//remove failed, undo
|
||||||
diff.undoDelete(child, undoInfo);
|
diff.undoDelete(child, undoInfo);
|
||||||
}
|
}
|
||||||
|
@ -516,11 +526,32 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
return removed;
|
return removed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean removeChildAndAllSnapshotCopies(INode child) {
|
||||||
|
if (!removeChild(child)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove same child from the created list, if there is any.
|
||||||
|
final byte[] name = child.getLocalNameBytes();
|
||||||
|
final List<DirectoryDiff> diffList = diffs.asList();
|
||||||
|
for(int i = diffList.size() - 1; i >= 0; i--) {
|
||||||
|
final ChildrenDiff diff = diffList.get(i).diff;
|
||||||
|
final int c = diff.searchCreatedIndex(name);
|
||||||
|
if (c >= 0) {
|
||||||
|
if (diff.removeCreatedChild(c, child)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void replaceChild(final INode oldChild, final INode newChild) {
|
public void replaceChild(final INode oldChild, final INode newChild) {
|
||||||
super.replaceChild(oldChild, newChild);
|
super.replaceChild(oldChild, newChild);
|
||||||
|
|
||||||
// replace the created child, if there is any.
|
// replace same child in the created list, if there is any.
|
||||||
final byte[] name = oldChild.getLocalNameBytes();
|
final byte[] name = oldChild.getLocalNameBytes();
|
||||||
final List<DirectoryDiff> diffList = diffs.asList();
|
final List<DirectoryDiff> diffList = diffs.asList();
|
||||||
for(int i = diffList.size() - 1; i >= 0; i--) {
|
for(int i = diffList.size() - 1; i >= 0; i--) {
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
import java.util.Iterator;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/** A class for testing quota-related commands */
|
/** A class for testing quota-related commands */
|
||||||
|
@ -171,15 +172,13 @@ public class TestQuota {
|
||||||
fout = dfs.create(childFile1, replication);
|
fout = dfs.create(childFile1, replication);
|
||||||
|
|
||||||
// 10.s: but writing fileLen bytes should result in an quota exception
|
// 10.s: but writing fileLen bytes should result in an quota exception
|
||||||
hasException = false;
|
|
||||||
try {
|
try {
|
||||||
fout.write(new byte[fileLen]);
|
fout.write(new byte[fileLen]);
|
||||||
fout.close();
|
fout.close();
|
||||||
|
Assert.fail();
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
hasException = true;
|
|
||||||
IOUtils.closeStream(fout);
|
IOUtils.closeStream(fout);
|
||||||
}
|
}
|
||||||
assertTrue(hasException);
|
|
||||||
|
|
||||||
//delete the file
|
//delete the file
|
||||||
dfs.delete(childFile1, false);
|
dfs.delete(childFile1, false);
|
||||||
|
|
|
@ -205,28 +205,15 @@ public class TestINodeFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAppendBlocks() {
|
public void testConcatBlocks() {
|
||||||
INodeFile origFile = createINodeFiles(1, "origfile")[0];
|
INodeFile origFile = createINodeFiles(1, "origfile")[0];
|
||||||
assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
|
assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
|
||||||
|
|
||||||
INodeFile[] appendFiles = createINodeFiles(4, "appendfile");
|
INodeFile[] appendFiles = createINodeFiles(4, "appendfile");
|
||||||
origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles));
|
origFile.concatBlocks(appendFiles);
|
||||||
assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
|
assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Gives the count of blocks for a given number of files
|
|
||||||
* @param files Array of INode files
|
|
||||||
* @return total count of blocks
|
|
||||||
*/
|
|
||||||
private int getTotalBlocks(INodeFile[] files) {
|
|
||||||
int nBlocks=0;
|
|
||||||
for(int i=0; i < files.length; i++) {
|
|
||||||
nBlocks += files[i].numBlocks();
|
|
||||||
}
|
|
||||||
return nBlocks;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates the required number of files with one block each
|
* Creates the required number of files with one block each
|
||||||
* @param nCount Number of INodes to create
|
* @param nCount Number of INodes to create
|
||||||
|
|
|
@ -17,13 +17,14 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -46,6 +46,8 @@ import org.junit.Test;
|
||||||
* Test cases for snapshot-related information in blocksMap.
|
* Test cases for snapshot-related information in blocksMap.
|
||||||
*/
|
*/
|
||||||
public class TestSnapshotBlocksMap {
|
public class TestSnapshotBlocksMap {
|
||||||
|
// TODO: fix concat for snapshot
|
||||||
|
private static final boolean runConcatTest = false;
|
||||||
|
|
||||||
private static final long seed = 0;
|
private static final long seed = 0;
|
||||||
private static final short REPLICATION = 3;
|
private static final short REPLICATION = 3;
|
||||||
|
@ -57,6 +59,8 @@ public class TestSnapshotBlocksMap {
|
||||||
protected Configuration conf;
|
protected Configuration conf;
|
||||||
protected MiniDFSCluster cluster;
|
protected MiniDFSCluster cluster;
|
||||||
protected FSNamesystem fsn;
|
protected FSNamesystem fsn;
|
||||||
|
FSDirectory fsdir;
|
||||||
|
BlockManager blockmanager;
|
||||||
protected DistributedFileSystem hdfs;
|
protected DistributedFileSystem hdfs;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -68,6 +72,8 @@ public class TestSnapshotBlocksMap {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
fsn = cluster.getNamesystem();
|
fsn = cluster.getNamesystem();
|
||||||
|
fsdir = fsn.getFSDirectory();
|
||||||
|
blockmanager = fsn.getBlockManager();
|
||||||
hdfs = cluster.getFileSystem();
|
hdfs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,6 +84,38 @@ public class TestSnapshotBlocksMap {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void assertAllNull(INodeFile inode, Path path, String[] snapshots) throws Exception {
|
||||||
|
Assert.assertNull(inode.getBlocks());
|
||||||
|
assertINodeNull(path.toString());
|
||||||
|
assertINodeNullInSnapshots(path, snapshots);
|
||||||
|
}
|
||||||
|
|
||||||
|
void assertINodeNull(String path) throws Exception {
|
||||||
|
Assert.assertNull(fsdir.getINode(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
void assertINodeNullInSnapshots(Path path, String... snapshots) throws Exception {
|
||||||
|
for(String s : snapshots) {
|
||||||
|
assertINodeNull(SnapshotTestHelper.getSnapshotPath(
|
||||||
|
path.getParent(), s, path.getName()).toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
INodeFile assertBlockCollection(String path, int numBlocks) throws Exception {
|
||||||
|
final INodeFile file = INodeFile.valueOf(fsdir.getINode(path), path);
|
||||||
|
assertEquals(numBlocks, file.getBlocks().length);
|
||||||
|
for(BlockInfo b : file.getBlocks()) {
|
||||||
|
assertBlockCollection(file, b);
|
||||||
|
}
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
|
void assertBlockCollection(final INodeFile file, final BlockInfo b) {
|
||||||
|
Assert.assertSame(b, blockmanager.getStoredBlock(b));
|
||||||
|
Assert.assertSame(file, blockmanager.getBlockCollection(b));
|
||||||
|
Assert.assertSame(file, b.getBlockCollection());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test deleting a file with snapshots. Need to check the blocksMap to make
|
* Test deleting a file with snapshots. Need to check the blocksMap to make
|
||||||
* sure the corresponding record is updated correctly.
|
* sure the corresponding record is updated correctly.
|
||||||
|
@ -87,87 +125,107 @@ public class TestSnapshotBlocksMap {
|
||||||
Path file0 = new Path(sub1, "file0");
|
Path file0 = new Path(sub1, "file0");
|
||||||
Path file1 = new Path(sub1, "file1");
|
Path file1 = new Path(sub1, "file1");
|
||||||
|
|
||||||
Path subsub1 = new Path(sub1, "sub1");
|
Path sub2 = new Path(sub1, "sub2");
|
||||||
Path subfile0 = new Path(subsub1, "file0");
|
Path file2 = new Path(sub2, "file2");
|
||||||
|
|
||||||
|
Path file3 = new Path(sub1, "file3");
|
||||||
|
Path file4 = new Path(sub1, "file4");
|
||||||
|
Path file5 = new Path(sub1, "file5");
|
||||||
|
|
||||||
// Create file under sub1
|
// Create file under sub1
|
||||||
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
|
DFSTestUtil.createFile(hdfs, file0, 4*BLOCKSIZE, REPLICATION, seed);
|
||||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed);
|
||||||
DFSTestUtil.createFile(hdfs, subfile0, BLOCKSIZE, REPLICATION, seed);
|
DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed);
|
||||||
|
|
||||||
BlockManager bm = fsn.getBlockManager();
|
|
||||||
FSDirectory dir = fsn.getFSDirectory();
|
|
||||||
|
|
||||||
INodeFile inodeForDeletedFile = INodeFile.valueOf(
|
|
||||||
dir.getINode(subfile0.toString()), subfile0.toString());
|
|
||||||
BlockInfo[] blocksForDeletedFile = inodeForDeletedFile.getBlocks();
|
|
||||||
assertEquals(blocksForDeletedFile.length, 1);
|
|
||||||
BlockCollection bcForDeletedFile = bm
|
|
||||||
.getBlockCollection(blocksForDeletedFile[0]);
|
|
||||||
assertNotNull(bcForDeletedFile);
|
|
||||||
// Normal deletion
|
// Normal deletion
|
||||||
hdfs.delete(subsub1, true);
|
{
|
||||||
bcForDeletedFile = bm.getBlockCollection(blocksForDeletedFile[0]);
|
final INodeFile f2 = assertBlockCollection(file2.toString(), 3);
|
||||||
// The INode should have been removed from the blocksMap
|
BlockInfo[] blocks = f2.getBlocks();
|
||||||
assertNull(bcForDeletedFile);
|
hdfs.delete(sub2, true);
|
||||||
|
// The INode should have been removed from the blocksMap
|
||||||
|
for(BlockInfo b : blocks) {
|
||||||
|
assertNull(blockmanager.getBlockCollection(b));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Create snapshots for sub1
|
// Create snapshots for sub1
|
||||||
for (int i = 0; i < 2; i++) {
|
final String[] snapshots = {"s0", "s1", "s2"};
|
||||||
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
|
DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
|
||||||
|
DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
|
||||||
|
DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);
|
||||||
|
|
||||||
|
// set replication so that the inode should be replaced for snapshots
|
||||||
|
{
|
||||||
|
INodeFile f1 = assertBlockCollection(file1.toString(), 2);
|
||||||
|
Assert.assertSame(INodeFile.class, f1.getClass());
|
||||||
|
hdfs.setReplication(file1, (short)2);
|
||||||
|
f1 = assertBlockCollection(file1.toString(), 2);
|
||||||
|
Assert.assertSame(INodeFileWithSnapshot.class, f1.getClass());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the block information for file0
|
// Check the block information for file0
|
||||||
// Get the INode for file0
|
final INodeFile f0 = assertBlockCollection(file0.toString(), 4);
|
||||||
INodeFile inode = INodeFile.valueOf(dir.getINode(file0.toString()),
|
BlockInfo[] blocks0 = f0.getBlocks();
|
||||||
file0.toString());
|
|
||||||
BlockInfo[] blocks = inode.getBlocks();
|
|
||||||
// Get the INode for the first block from blocksMap
|
|
||||||
BlockCollection bc = bm.getBlockCollection(blocks[0]);
|
|
||||||
// The two INode should be the same one
|
|
||||||
assertTrue(bc == inode);
|
|
||||||
|
|
||||||
// Also check the block information for snapshot of file0
|
// Also check the block information for snapshot of file0
|
||||||
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
|
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
|
||||||
file0.getName());
|
file0.getName());
|
||||||
INodeFile ssINode0 = INodeFile.valueOf(dir.getINode(snapshotFile0.toString()),
|
assertBlockCollection(snapshotFile0.toString(), 4);
|
||||||
snapshotFile0.toString());
|
|
||||||
BlockInfo[] ssBlocks = ssINode0.getBlocks();
|
|
||||||
// The snapshot of file1 should contain 1 block
|
|
||||||
assertEquals(1, ssBlocks.length);
|
|
||||||
|
|
||||||
// Delete file0
|
// Delete file0
|
||||||
hdfs.delete(file0, true);
|
hdfs.delete(file0, true);
|
||||||
// Make sure the first block of file0 is still in blocksMap
|
// Make sure the blocks of file0 is still in blocksMap
|
||||||
BlockInfo blockInfoAfterDeletion = bm.getStoredBlock(blocks[0]);
|
for(BlockInfo b : blocks0) {
|
||||||
assertNotNull(blockInfoAfterDeletion);
|
assertNotNull(blockmanager.getBlockCollection(b));
|
||||||
// Check the INode information
|
}
|
||||||
BlockCollection bcAfterDeletion = blockInfoAfterDeletion
|
assertBlockCollection(snapshotFile0.toString(), 4);
|
||||||
.getBlockCollection();
|
|
||||||
|
|
||||||
// Compare the INode in the blocksMap with INodes for snapshots
|
// Compare the INode in the blocksMap with INodes for snapshots
|
||||||
Path snapshot1File0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1",
|
String s1f0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1",
|
||||||
file0.getName());
|
file0.getName()).toString();
|
||||||
INodeFile ssINode1 = INodeFile.valueOf(
|
assertBlockCollection(s1f0, 4);
|
||||||
dir.getINode(snapshot1File0.toString()), snapshot1File0.toString());
|
|
||||||
assertTrue(bcAfterDeletion == ssINode0 || bcAfterDeletion == ssINode1);
|
|
||||||
assertEquals(1, bcAfterDeletion.getBlocks().length);
|
|
||||||
|
|
||||||
// Delete snapshot s1
|
// Delete snapshot s1
|
||||||
hdfs.deleteSnapshot(sub1, "s1");
|
hdfs.deleteSnapshot(sub1, "s1");
|
||||||
|
|
||||||
// Make sure the first block of file0 is still in blocksMap
|
// Make sure the first block of file0 is still in blocksMap
|
||||||
BlockInfo blockInfoAfterSnapshotDeletion = bm.getStoredBlock(blocks[0]);
|
for(BlockInfo b : blocks0) {
|
||||||
assertNotNull(blockInfoAfterSnapshotDeletion);
|
assertNotNull(blockmanager.getBlockCollection(b));
|
||||||
BlockCollection bcAfterSnapshotDeletion = blockInfoAfterSnapshotDeletion
|
}
|
||||||
.getBlockCollection();
|
assertBlockCollection(snapshotFile0.toString(), 4);
|
||||||
assertTrue(bcAfterSnapshotDeletion == ssINode0);
|
|
||||||
assertEquals(1, bcAfterSnapshotDeletion.getBlocks().length);
|
|
||||||
try {
|
try {
|
||||||
ssINode1 = INodeFile.valueOf(
|
INodeFile.valueOf(fsdir.getINode(s1f0), s1f0);
|
||||||
dir.getINode(snapshot1File0.toString()), snapshot1File0.toString());
|
|
||||||
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
|
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
GenericTestUtils.assertExceptionContains("File does not exist: "
|
assertExceptionContains("File does not exist: " + s1f0, e);
|
||||||
+ snapshot1File0.toString(), e);
|
}
|
||||||
|
|
||||||
|
// concat file1, file3 and file5 to file4
|
||||||
|
if (runConcatTest) {
|
||||||
|
final INodeFile f1 = assertBlockCollection(file1.toString(), 2);
|
||||||
|
final BlockInfo[] f1blocks = f1.getBlocks();
|
||||||
|
final INodeFile f3 = assertBlockCollection(file3.toString(), 5);
|
||||||
|
final BlockInfo[] f3blocks = f3.getBlocks();
|
||||||
|
final INodeFile f5 = assertBlockCollection(file5.toString(), 7);
|
||||||
|
final BlockInfo[] f5blocks = f5.getBlocks();
|
||||||
|
assertBlockCollection(file4.toString(), 1);
|
||||||
|
|
||||||
|
hdfs.concat(file4, new Path[]{file1, file3, file5});
|
||||||
|
|
||||||
|
final INodeFile f4 = assertBlockCollection(file4.toString(), 15);
|
||||||
|
final BlockInfo[] blocks4 = f4.getBlocks();
|
||||||
|
for(BlockInfo[] blocks : Arrays.asList(f1blocks, f3blocks, blocks4, f5blocks)) {
|
||||||
|
for(BlockInfo b : blocks) {
|
||||||
|
assertBlockCollection(f4, b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertAllNull(f1, file1, snapshots);
|
||||||
|
assertAllNull(f3, file3, snapshots);
|
||||||
|
assertAllNull(f5, file5, snapshots);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue