diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e21fcdff37f..59ec12a9b8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -242,6 +242,9 @@ Trunk (Unreleased) HDFS-5651. Remove dfs.namenode.caching.enabled and improve CRM locking. (cmccabe via wang) + HDFS-5715. Use Snapshot ID to indicate the corresponding Snapshot for a + FileDiff/DirectoryDiff. (jing9) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index aef726fa9b9..dc74c51dea9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.Time; @@ -339,7 +340,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable { } } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); - ReadOnlyList children = dir.getChildrenList(null); + ReadOnlyList children = dir + .getChildrenList(Snapshot.CURRENT_STATE_ID); for (INode child : children) { if (child.isFile()) { rescanFile(directive, child.asFile()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index b3ff8dfef59..5cb8fe92332 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -53,7 +53,6 @@ import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -69,6 +68,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; @@ -399,7 +399,8 @@ public final class CacheManager { requestedBytes = file.computeFileSize(); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); - ReadOnlyList children = dir.getChildrenList(null); + ReadOnlyList children = dir + .getChildrenList(Snapshot.CURRENT_STATE_ID); requestedFiles = children.size(); for (INode child : children) { if (child.isFile()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index a4eae1ee148..a9f17fa3bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -614,14 +614,14 @@ public class FSDirectory implements Closeable { INode srcChild = srcIIP.getLastINode(); final byte[] srcChildName = srcChild.getLocalNameBytes(); final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( - srcIIP.getLatestSnapshot()); + srcIIP.getLatestSnapshotId()); final boolean srcChildIsReference = srcChild.isReference(); // Record the snapshot on srcChild. After the rename, before any new // snapshot is taken on the dst tree, changes will be recorded in the latest // snapshot of the src tree. if (isSrcInSnapshot) { - srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot()); + srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId()); srcIIP.setLastINode(srcChild); } @@ -629,17 +629,16 @@ public class FSDirectory implements Closeable { final INodeReference.WithCount withCount; Quota.Counts oldSrcCounts = Quota.Counts.newInstance(); int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() - .getDstSnapshotId() : Snapshot.INVALID_ID; + .getDstSnapshotId() : Snapshot.CURRENT_STATE_ID; if (isSrcInSnapshot) { final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory().replaceChild4ReferenceWithName( - srcChild, srcIIP.getLatestSnapshot()); + srcChild, srcIIP.getLatestSnapshotId()); withCount = (INodeReference.WithCount) withName.getReferredINode(); srcChild = withName; srcIIP.setLastINode(srcChild); // get the counts before rename - withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true, - Snapshot.INVALID_ID); + withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true); } else if (srcChildIsReference) { // srcChild is reference but srcChild is not in latest snapshot withCount = (WithCount) srcChild.asReference().getReferredINode(); @@ -675,10 +674,9 @@ public class FSDirectory implements Closeable { toDst = srcChild; } else { withCount.getReferredINode().setLocalName(dstChildName); - Snapshot dstSnapshot = dstIIP.getLatestSnapshot(); + int dstSnapshotId = dstIIP.getLatestSnapshotId(); final INodeReference.DstReference ref = new INodeReference.DstReference( - dstParent.asDirectory(), withCount, - dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId()); + dstParent.asDirectory(), withCount, dstSnapshotId); toDst = ref; } @@ -690,9 +688,9 @@ public class FSDirectory implements Closeable { } // update modification time of dst and the parent of src final INode srcParent = srcIIP.getINode(-2); - srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot()); + srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId()); dstParent = dstIIP.getINode(-2); // refresh dstParent - dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot()); + dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId()); // update moved leases with new filename getFSNamesystem().unprotectedChangeLease(src, dst); @@ -700,7 +698,7 @@ public class FSDirectory implements Closeable { if (isSrcInSnapshot) { // get the counts after rename Quota.Counts newSrcCounts = srcChild.computeQuotaUsage( - Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); + Quota.Counts.newInstance(), false); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), newSrcCounts.get(Quota.DISKSPACE), false); @@ -732,8 +730,7 @@ public class FSDirectory implements Closeable { if (isSrcInSnapshot) { // srcParent must have snapshot feature since isSrcInSnapshot is true // and src node has been removed from srcParent - srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild, - srcIIP.getLatestSnapshot()); + srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild); } else { // original srcChild is not in latest snapshot, we only need to add // the srcChild back @@ -836,7 +833,7 @@ public class FSDirectory implements Closeable { } if (dstInode.isDirectory()) { final ReadOnlyList children = dstInode.asDirectory() - .getChildrenList(null); + .getChildrenList(Snapshot.CURRENT_STATE_ID); if (!children.isEmpty()) { error = "rename destination directory is not empty: " + dst; NameNode.stateChangeLog.warn( @@ -867,31 +864,30 @@ public class FSDirectory implements Closeable { INode srcChild = srcIIP.getLastINode(); final byte[] srcChildName = srcChild.getLocalNameBytes(); final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( - srcIIP.getLatestSnapshot()); + srcIIP.getLatestSnapshotId()); final boolean srcChildIsReference = srcChild.isReference(); // Record the snapshot on srcChild. After the rename, before any new // snapshot is taken on the dst tree, changes will be recorded in the latest // snapshot of the src tree. if (isSrcInSnapshot) { - srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot()); + srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId()); srcIIP.setLastINode(srcChild); } // check srcChild for reference final INodeReference.WithCount withCount; int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() - .getDstSnapshotId() : Snapshot.INVALID_ID; + .getDstSnapshotId() : Snapshot.CURRENT_STATE_ID; Quota.Counts oldSrcCounts = Quota.Counts.newInstance(); if (isSrcInSnapshot) { final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() - .replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshot()); + .replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshotId()); withCount = (INodeReference.WithCount) withName.getReferredINode(); srcChild = withName; srcIIP.setLastINode(srcChild); // get the counts before rename - withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true, - Snapshot.INVALID_ID); + withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true); } else if (srcChildIsReference) { // srcChild is reference but srcChild is not in latest snapshot withCount = (WithCount) srcChild.asReference().getReferredINode(); @@ -935,10 +931,9 @@ public class FSDirectory implements Closeable { toDst = srcChild; } else { withCount.getReferredINode().setLocalName(dstChildName); - Snapshot dstSnapshot = dstIIP.getLatestSnapshot(); + int dstSnapshotId = dstIIP.getLatestSnapshotId(); final INodeReference.DstReference ref = new INodeReference.DstReference( - dstIIP.getINode(-2).asDirectory(), withCount, - dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId()); + dstIIP.getINode(-2).asDirectory(), withCount, dstSnapshotId); toDst = ref; } @@ -952,9 +947,9 @@ public class FSDirectory implements Closeable { } final INode srcParent = srcIIP.getINode(-2); - srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot()); + srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId()); dstParent = dstIIP.getINode(-2); - dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot()); + dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId()); // update moved lease with new filename getFSNamesystem().unprotectedChangeLease(src, dst); @@ -964,8 +959,8 @@ public class FSDirectory implements Closeable { undoRemoveDst = false; BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ChunkedArrayList(); - filesDeleted = removedDst.cleanSubtree(null, - dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) + filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID, + dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, true) .get(Quota.NAMESPACE); getFSNamesystem().removePathAndBlocks(src, collectedBlocks, removedINodes); @@ -981,7 +976,7 @@ public class FSDirectory implements Closeable { if (isSrcInSnapshot) { // get the counts after rename Quota.Counts newSrcCounts = srcChild.computeQuotaUsage( - Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); + Quota.Counts.newInstance(), false); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), newSrcCounts.get(Quota.DISKSPACE), false); @@ -1012,8 +1007,7 @@ public class FSDirectory implements Closeable { } if (srcParent.isWithSnapshot()) { - srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild, - srcIIP.getLatestSnapshot()); + srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild); } else { // srcParent is not an INodeDirectoryWithSnapshot, we only need to add // the srcChild back @@ -1024,7 +1018,7 @@ public class FSDirectory implements Closeable { // Rename failed - restore dst if (dstParent.isDirectory() && dstParent.asDirectory().isWithSnapshot()) { dstParent.asDirectory().undoRename4DstParent(removedDst, - dstIIP.getLatestSnapshot()); + dstIIP.getLatestSnapshotId()); } else { addLastINodeNoQuotaCheck(dstIIP, removedDst); } @@ -1088,7 +1082,7 @@ public class FSDirectory implements Closeable { updateCount(iip, 0, dsDelta, true); } - file = file.setFileReplication(replication, iip.getLatestSnapshot(), + file = file.setFileReplication(replication, iip.getLatestSnapshotId(), inodeMap); final short newBR = file.getBlockReplication(); @@ -1155,7 +1149,7 @@ public class FSDirectory implements Closeable { if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); } - inode.setPermission(permissions, inodesInPath.getLatestSnapshot()); + inode.setPermission(permissions, inodesInPath.getLatestSnapshotId()); } void setOwner(String src, String username, String groupname) @@ -1180,10 +1174,10 @@ public class FSDirectory implements Closeable { throw new FileNotFoundException("File does not exist: " + src); } if (username != null) { - inode = inode.setUser(username, inodesInPath.getLatestSnapshot()); + inode = inode.setUser(username, inodesInPath.getLatestSnapshotId()); } if (groupname != null) { - inode.setGroup(groupname, inodesInPath.getLatestSnapshot()); + inode.setGroup(groupname, inodesInPath.getLatestSnapshotId()); } } @@ -1225,12 +1219,12 @@ public class FSDirectory implements Closeable { final INode[] trgINodes = trgIIP.getINodes(); final INodeFile trgInode = trgIIP.getLastINode().asFile(); INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory(); - final Snapshot trgLatestSnapshot = trgIIP.getLatestSnapshot(); + final int trgLatestSnapshot = trgIIP.getLatestSnapshotId(); final INodeFile [] allSrcInodes = new INodeFile[srcs.length]; for(int i = 0; i < srcs.length; i++) { final INodesInPath iip = getINodesInPath4Write(srcs[i]); - final Snapshot latest = iip.getLatestSnapshot(); + final int latest = iip.getLatestSnapshotId(); final INode inode = iip.getLastINode(); // check if the file in the latest snapshot @@ -1354,7 +1348,7 @@ public class FSDirectory implements Closeable { //not found or not a directory return false; } - final Snapshot s = inodesInPath.getPathSnapshot(); + final int s = inodesInPath.getPathSnapshotId(); return !inode.asDirectory().getChildrenList(s).isEmpty(); } finally { readUnlock(); @@ -1408,7 +1402,7 @@ public class FSDirectory implements Closeable { } // record modification - final Snapshot latestSnapshot = iip.getLatestSnapshot(); + final int latestSnapshot = iip.getLatestSnapshotId(); targetNode = targetNode.recordModification(latestSnapshot); iip.setLastINode(targetNode); @@ -1429,8 +1423,8 @@ public class FSDirectory implements Closeable { if (!targetNode.isInLatestSnapshot(latestSnapshot)) { targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { - Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, - collectedBlocks, removedINodes, true); + Quota.Counts counts = targetNode.cleanSubtree(Snapshot.CURRENT_STATE_ID, + latestSnapshot, collectedBlocks, removedINodes, true); parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE), true); removed = counts.get(Quota.NAMESPACE); @@ -1467,7 +1461,7 @@ public class FSDirectory implements Closeable { } } } - for (INode child : targetDir.getChildrenList(null)) { + for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) { checkSnapshot(child, snapshottableDirs); } } @@ -1491,7 +1485,7 @@ public class FSDirectory implements Closeable { return getSnapshotsListing(srcs, startAfter); } final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); - final Snapshot snapshot = inodesInPath.getPathSnapshot(); + final int snapshot = inodesInPath.getPathSnapshotId(); final INode targetNode = inodesInPath.getINode(0); if (targetNode == null) return null; @@ -1543,7 +1537,8 @@ public class FSDirectory implements Closeable { final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing; i++) { Root sRoot = snapshots.get(i + skipSize).getRoot(); - listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null); + listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, + Snapshot.CURRENT_STATE_ID); } return new DirectoryListing( listing, snapshots.size() - skipSize - numOfListing); @@ -1566,7 +1561,7 @@ public class FSDirectory implements Closeable { final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); final INode i = inodesInPath.getINode(0); return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, - inodesInPath.getPathSnapshot()); + inodesInPath.getPathSnapshotId()); } finally { readUnlock(); } @@ -2129,7 +2124,7 @@ public class FSDirectory implements Closeable { } final INodeDirectory parent = pathComponents[pos-1].asDirectory(); - final int count = parent.getChildrenList(null).size(); + final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size(); if (count >= maxDirItems) { final MaxDirectoryItemsExceededException e = new MaxDirectoryItemsExceededException(maxDirItems, count); @@ -2193,7 +2188,7 @@ public class FSDirectory implements Closeable { final INodeDirectory parent = inodes[pos-1].asDirectory(); boolean added = false; try { - added = parent.addChild(child, true, iip.getLatestSnapshot()); + added = parent.addChild(child, true, iip.getLatestSnapshotId()); } catch (QuotaExceededException e) { updateCountNoQuotaCheck(iip, pos, -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); @@ -2228,7 +2223,7 @@ public class FSDirectory implements Closeable { */ private long removeLastINode(final INodesInPath iip) throws QuotaExceededException { - final Snapshot latestSnapshot = iip.getLatestSnapshot(); + final int latestSnapshot = iip.getLatestSnapshotId(); final INode last = iip.getLastINode(); final INodeDirectory parent = iip.getINode(-2).asDirectory(); if (!parent.removeChild(last, latestSnapshot)) { @@ -2382,7 +2377,7 @@ public class FSDirectory implements Closeable { return null; } - final Snapshot latest = iip.getLatestSnapshot(); + final int latest = iip.getLatestSnapshotId(); dirNode = dirNode.recordModification(latest); dirNode.setQuota(nsQuota, dsQuota); return dirNode; @@ -2425,11 +2420,11 @@ public class FSDirectory implements Closeable { * Sets the access time on the file/directory. Logs it in the transaction log. */ void setTimes(String src, INode inode, long mtime, long atime, boolean force, - Snapshot latest) throws QuotaExceededException { + int latestSnapshotId) throws QuotaExceededException { boolean status = false; writeLock(); try { - status = unprotectedSetTimes(inode, mtime, atime, force, latest); + status = unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId); } finally { writeUnlock(); } @@ -2443,11 +2438,11 @@ public class FSDirectory implements Closeable { assert hasWriteLock(); final INodesInPath i = getLastINodeInPath(src); return unprotectedSetTimes(i.getLastINode(), mtime, atime, force, - i.getLatestSnapshot()); + i.getLatestSnapshotId()); } private boolean unprotectedSetTimes(INode inode, long mtime, - long atime, boolean force, Snapshot latest) throws QuotaExceededException { + long atime, boolean force, int latest) throws QuotaExceededException { assert hasWriteLock(); boolean status = false; if (mtime != -1) { @@ -2455,7 +2450,7 @@ public class FSDirectory implements Closeable { status = true; } if (atime != -1) { - long inodeTime = inode.getAccessTime(null); + long inodeTime = inode.getAccessTime(); // if the last access time update was within the last precision interval, then // no need to store access time @@ -2495,7 +2490,7 @@ public class FSDirectory implements Closeable { * @throws IOException if any error occurs */ private HdfsFileStatus createFileStatus(byte[] path, INode node, - boolean needLocation, Snapshot snapshot) throws IOException { + boolean needLocation, int snapshot) throws IOException { if (needLocation) { return createLocatedFileStatus(path, node, snapshot); } else { @@ -2506,7 +2501,7 @@ public class FSDirectory implements Closeable { * Create FileStatus by file INode */ HdfsFileStatus createFileStatus(byte[] path, INode node, - Snapshot snapshot) { + int snapshot) { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; @@ -2539,7 +2534,7 @@ public class FSDirectory implements Closeable { * Create FileStatus with location info by file INode */ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, - INode node, Snapshot snapshot) throws IOException { + INode node, int snapshot) throws IOException { assert hasReadLock(); long size = 0; // length is zero for directories short replication = 0; @@ -2551,7 +2546,7 @@ public class FSDirectory implements Closeable { replication = fileNode.getFileReplication(snapshot); blocksize = fileNode.getPreferredBlockSize(); - final boolean inSnapshot = snapshot != null; + final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID; final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction(); final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index c0d1032d605..634b509cb53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -24,14 +24,12 @@ import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.EnumMap; -import java.util.EnumSet; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -79,6 +77,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; @@ -327,7 +326,7 @@ public class FSEditLogLoader { // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = fsNamesys.dir.createFileStatus( - HdfsFileStatus.EMPTY_NAME, newFile, null); + HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, stat); } @@ -340,7 +339,7 @@ public class FSEditLogLoader { } LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path, oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null, - false, iip.getLatestSnapshot(), false); + false, iip.getLatestSnapshotId(), false); newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path), addCloseOp.path, true); @@ -356,8 +355,8 @@ public class FSEditLogLoader { // update the block list. // Update the salient file attributes. - newFile.setAccessTime(addCloseOp.atime, null); - newFile.setModificationTime(addCloseOp.mtime, null); + newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); + newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); updateBlocks(fsDir, addCloseOp, newFile); break; } @@ -375,8 +374,8 @@ public class FSEditLogLoader { final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path); // Update the salient file attributes. - file.setAccessTime(addCloseOp.atime, null); - file.setModificationTime(addCloseOp.mtime, null); + file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); + file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); updateBlocks(fsDir, addCloseOp, file); // Now close the file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 14764e006ec..cc4ca0c7772 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; @@ -770,7 +771,7 @@ public class FSImage implements Closeable { dir.computeQuotaUsage4CurrentDirectory(counts); - for (INode child : dir.getChildrenList(null)) { + for (INode child : dir.getChildrenList(Snapshot.CURRENT_STATE_ID)) { if (child.isDirectory()) { updateCountForQuotaRecursively(child.asDirectory(), counts); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index fe2929571bc..5c2ca578ad4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -1111,7 +1111,8 @@ public class FSImageFormat { return; } - final ReadOnlyList children = current.getChildrenList(null); + final ReadOnlyList children = current + .getChildrenList(Snapshot.CURRENT_STATE_ID); int dirNum = 0; List snapshotDirs = null; DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 101eecec6f6..600f0fbb261 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -176,7 +176,15 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; -import org.apache.hadoop.hdfs.server.blockmanagement.*; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.OutOfV1GenerationStampsException; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; @@ -1617,11 +1625,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (isReadOp) { continue; } - dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshot()); + dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshotId()); } } final long fileSize = iip.isSnapshot() ? - inode.computeFileSize(iip.getPathSnapshot()) + inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock(); boolean isUc = inode.isUnderConstruction(); if (iip.isSnapshot()) { @@ -1869,7 +1877,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final INodesInPath iip = dir.getINodesInPath4Write(src); final INode inode = iip.getLastINode(); if (inode != null) { - dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshot()); + dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshotId()); resultingStat = getAuditFileInfo(src, false); } else { throw new FileNotFoundException("File/Directory " + src + " does not exist."); @@ -2265,7 +2273,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final DatanodeDescriptor clientNode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode, - true, iip.getLatestSnapshot(), logRetryCache); + true, iip.getLatestSnapshotId(), logRetryCache); } catch (IOException ie) { NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage()); throw ie; @@ -2290,7 +2298,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, */ LocatedBlock prepareFileForWrite(String src, INodeFile file, String leaseHolder, String clientMachine, DatanodeDescriptor clientNode, - boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache) + boolean writeToEditLog, int latestSnapshot, boolean logRetryCache) throws IOException { file = file.recordModification(latestSnapshot); final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine, @@ -2925,7 +2933,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } finalizeINodeFileUnderConstruction(src, pendingFile, - iip.getLatestSnapshot()); + iip.getLatestSnapshotId()); return true; } @@ -3634,7 +3642,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // then reap lease immediately and close the file. if(nrCompleteBlocks == nrBlocks) { finalizeINodeFileUnderConstruction(src, pendingFile, - iip.getLatestSnapshot()); + iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: All existing blocks are COMPLETE," + " lease removed, file closed."); @@ -3683,7 +3691,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if(penultimateBlockMinReplication && blockManager.checkMinReplication(lastBlock)) { finalizeINodeFileUnderConstruction(src, pendingFile, - iip.getLatestSnapshot()); + iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: Committed blocks are minimally replicated," + " lease removed, file closed."); @@ -3714,7 +3722,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // We can remove this block and close the file. pendingFile.removeLastBlock(lastBlock); finalizeINodeFileUnderConstruction(src, pendingFile, - iip.getLatestSnapshot()); + iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: " + "Removed empty last block and closed file."); return true; @@ -3775,7 +3783,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } private void finalizeINodeFileUnderConstruction(String src, - INodeFile pendingFile, Snapshot latestSnapshot) throws IOException, + INodeFile pendingFile, int latestSnapshot) throws IOException, UnresolvedLinkException { assert hasWriteLock(); FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature(); @@ -3975,7 +3983,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, //remove lease, close file finalizeINodeFileUnderConstruction(src, pendingFile, - Snapshot.findLatestSnapshot(pendingFile, null)); + Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID)); return src; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 17b43239c83..098fc17393a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -29,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -142,54 +140,54 @@ class FSPermissionChecker { // check if (parentAccess != null) && file exists, then check sb // If resolveLink, the check is performed on the link target. final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink); - final Snapshot snapshot = inodesInPath.getPathSnapshot(); + final int snapshotId = inodesInPath.getPathSnapshotId(); final INode[] inodes = inodesInPath.getINodes(); int ancestorIndex = inodes.length - 2; for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); - checkTraverse(inodes, ancestorIndex, snapshot); + checkTraverse(inodes, ancestorIndex, snapshotId); final INode last = inodes[inodes.length - 1]; if (parentAccess != null && parentAccess.implies(FsAction.WRITE) && inodes.length > 1 && last != null) { - checkStickyBit(inodes[inodes.length - 2], last, snapshot); + checkStickyBit(inodes[inodes.length - 2], last, snapshotId); } if (ancestorAccess != null && inodes.length > 1) { - check(inodes, ancestorIndex, snapshot, ancestorAccess); + check(inodes, ancestorIndex, snapshotId, ancestorAccess); } if (parentAccess != null && inodes.length > 1) { - check(inodes, inodes.length - 2, snapshot, parentAccess); + check(inodes, inodes.length - 2, snapshotId, parentAccess); } if (access != null) { - check(last, snapshot, access); + check(last, snapshotId, access); } if (subAccess != null) { - checkSubAccess(last, snapshot, subAccess); + checkSubAccess(last, snapshotId, subAccess); } if (doCheckOwner) { - checkOwner(last, snapshot); + checkOwner(last, snapshotId); } } /** Guarded by {@link FSNamesystem#readLock()} */ - private void checkOwner(INode inode, Snapshot snapshot + private void checkOwner(INode inode, int snapshotId ) throws AccessControlException { - if (inode != null && user.equals(inode.getUserName(snapshot))) { + if (inode != null && user.equals(inode.getUserName(snapshotId))) { return; } throw new AccessControlException("Permission denied"); } /** Guarded by {@link FSNamesystem#readLock()} */ - private void checkTraverse(INode[] inodes, int last, Snapshot snapshot + private void checkTraverse(INode[] inodes, int last, int snapshotId ) throws AccessControlException { for(int j = 0; j <= last; j++) { - check(inodes[j], snapshot, FsAction.EXECUTE); + check(inodes[j], snapshotId, FsAction.EXECUTE); } } /** Guarded by {@link FSNamesystem#readLock()} */ - private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access + private void checkSubAccess(INode inode, int snapshotId, FsAction access ) throws AccessControlException { if (inode == null || !inode.isDirectory()) { return; @@ -198,9 +196,9 @@ class FSPermissionChecker { Stack directories = new Stack(); for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) { INodeDirectory d = directories.pop(); - check(d, snapshot, access); + check(d, snapshotId, access); - for(INode child : d.getChildrenList(snapshot)) { + for(INode child : d.getChildrenList(snapshotId)) { if (child.isDirectory()) { directories.push(child.asDirectory()); } @@ -209,23 +207,23 @@ class FSPermissionChecker { } /** Guarded by {@link FSNamesystem#readLock()} */ - private void check(INode[] inodes, int i, Snapshot snapshot, FsAction access + private void check(INode[] inodes, int i, int snapshotId, FsAction access ) throws AccessControlException { - check(i >= 0? inodes[i]: null, snapshot, access); + check(i >= 0? inodes[i]: null, snapshotId, access); } /** Guarded by {@link FSNamesystem#readLock()} */ - private void check(INode inode, Snapshot snapshot, FsAction access + private void check(INode inode, int snapshotId, FsAction access ) throws AccessControlException { if (inode == null) { return; } - FsPermission mode = inode.getFsPermission(snapshot); + FsPermission mode = inode.getFsPermission(snapshotId); - if (user.equals(inode.getUserName(snapshot))) { //user class + if (user.equals(inode.getUserName(snapshotId))) { //user class if (mode.getUserAction().implies(access)) { return; } } - else if (groups.contains(inode.getGroupName(snapshot))) { //group class + else if (groups.contains(inode.getGroupName(snapshotId))) { //group class if (mode.getGroupAction().implies(access)) { return; } } else { //other class @@ -236,19 +234,19 @@ class FSPermissionChecker { } /** Guarded by {@link FSNamesystem#readLock()} */ - private void checkStickyBit(INode parent, INode inode, Snapshot snapshot + private void checkStickyBit(INode parent, INode inode, int snapshotId ) throws AccessControlException { - if(!parent.getFsPermission(snapshot).getStickyBit()) { + if(!parent.getFsPermission(snapshotId).getStickyBit()) { return; } // If this user is the directory owner, return - if(parent.getUserName(snapshot).equals(user)) { + if(parent.getUserName(snapshotId).equals(user)) { return; } // if this user is the file owner, return - if(inode.getUserName(snapshot).equals(user)) { + if(inode.getUserName(snapshotId).equals(user)) { return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 1c87d2f4255..5efb2a7e04e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -70,98 +70,101 @@ public abstract class INode implements INodeAttributes, Diff.Element { } /** Get the {@link PermissionStatus} */ - abstract PermissionStatus getPermissionStatus(Snapshot snapshot); + abstract PermissionStatus getPermissionStatus(int snapshotId); /** The same as getPermissionStatus(null). */ final PermissionStatus getPermissionStatus() { - return getPermissionStatus(null); + return getPermissionStatus(Snapshot.CURRENT_STATE_ID); } /** - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current inode. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the given snapshot; otherwise, get the result from the + * current inode. * @return user name */ - abstract String getUserName(Snapshot snapshot); + abstract String getUserName(int snapshotId); - /** The same as getUserName(null). */ + /** The same as getUserName(Snapshot.CURRENT_STATE_ID). */ @Override public final String getUserName() { - return getUserName(null); + return getUserName(Snapshot.CURRENT_STATE_ID); } /** Set user */ abstract void setUser(String user); /** Set user */ - final INode setUser(String user, Snapshot latest) + final INode setUser(String user, int latestSnapshotId) throws QuotaExceededException { - final INode nodeToUpdate = recordModification(latest); + final INode nodeToUpdate = recordModification(latestSnapshotId); nodeToUpdate.setUser(user); return nodeToUpdate; } /** - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current inode. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the given snapshot; otherwise, get the result from the + * current inode. * @return group name */ - abstract String getGroupName(Snapshot snapshot); + abstract String getGroupName(int snapshotId); - /** The same as getGroupName(null). */ + /** The same as getGroupName(Snapshot.CURRENT_STATE_ID). */ @Override public final String getGroupName() { - return getGroupName(null); + return getGroupName(Snapshot.CURRENT_STATE_ID); } /** Set group */ abstract void setGroup(String group); /** Set group */ - final INode setGroup(String group, Snapshot latest) + final INode setGroup(String group, int latestSnapshotId) throws QuotaExceededException { - final INode nodeToUpdate = recordModification(latest); + final INode nodeToUpdate = recordModification(latestSnapshotId); nodeToUpdate.setGroup(group); return nodeToUpdate; } /** - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current inode. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the given snapshot; otherwise, get the result from the + * current inode. * @return permission. */ - abstract FsPermission getFsPermission(Snapshot snapshot); + abstract FsPermission getFsPermission(int snapshotId); - /** The same as getFsPermission(null). */ + /** The same as getFsPermission(Snapshot.CURRENT_STATE_ID). */ @Override public final FsPermission getFsPermission() { - return getFsPermission(null); + return getFsPermission(Snapshot.CURRENT_STATE_ID); } /** Set the {@link FsPermission} of this {@link INode} */ abstract void setPermission(FsPermission permission); /** Set the {@link FsPermission} of this {@link INode} */ - INode setPermission(FsPermission permission, Snapshot latest) + INode setPermission(FsPermission permission, int latestSnapshotId) throws QuotaExceededException { - final INode nodeToUpdate = recordModification(latest); + final INode nodeToUpdate = recordModification(latestSnapshotId); nodeToUpdate.setPermission(permission); return nodeToUpdate; } /** - * @return if the given snapshot is null, return this; - * otherwise return the corresponding snapshot inode. + * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID}, + * return this; otherwise return the corresponding snapshot inode. */ - public INodeAttributes getSnapshotINode(final Snapshot snapshot) { + public INodeAttributes getSnapshotINode(final int snapshotId) { return this; } /** Is this inode in the latest snapshot? */ - public final boolean isInLatestSnapshot(final Snapshot latest) { - if (latest == null) { + public final boolean isInLatestSnapshot(final int latestSnapshotId) { + if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) { return false; } // if parent is a reference node, parent must be a renamed node. We can @@ -173,10 +176,11 @@ public abstract class INode implements INodeAttributes, Diff.Element { if (parentDir == null) { // root return true; } - if (!parentDir.isInLatestSnapshot(latest)) { + if (!parentDir.isInLatestSnapshot(latestSnapshotId)) { return false; } - final INode child = parentDir.getChild(getLocalNameBytes(), latest); + final INode child = parentDir.getChild(getLocalNameBytes(), + latestSnapshotId); if (this == child) { return true; } @@ -203,21 +207,22 @@ public abstract class INode implements INodeAttributes, Diff.Element { * operation, or the snapshot belonging to the DST tree. * * @param latestInDst - * the latest snapshot in the DST tree above the reference node + * id of the latest snapshot in the DST tree above the reference node * @return True: the modification should be recorded in the snapshot that * belongs to the SRC tree. False: the modification should be * recorded in the snapshot that belongs to the DST tree. */ - public final boolean shouldRecordInSrcSnapshot(final Snapshot latestInDst) { + public final boolean shouldRecordInSrcSnapshot(final int latestInDst) { Preconditions.checkState(!isReference()); - if (latestInDst == null) { + if (latestInDst == Snapshot.CURRENT_STATE_ID) { return true; } INodeReference withCount = getParentReference(); if (withCount != null) { int dstSnapshotId = withCount.getParentReference().getDstSnapshotId(); - if (dstSnapshotId >= latestInDst.getId()) { + if (dstSnapshotId != Snapshot.CURRENT_STATE_ID + && dstSnapshotId >= latestInDst) { return true; } } @@ -228,13 +233,14 @@ public abstract class INode implements INodeAttributes, Diff.Element { * This inode is being modified. The previous version of the inode needs to * be recorded in the latest snapshot. * - * @param latest the latest snapshot that has been taken. - * Note that it is null if no snapshots have been taken. + * @param latestSnapshotId The id of the latest snapshot that has been taken. + * Note that it is {@link Snapshot#CURRENT_STATE_ID} + * if no snapshots have been taken. * @return The current inode, which usually is the same object of this inode. * However, in some cases, this inode may be replaced with a new inode * for maintaining snapshots. The current inode is then the new inode. */ - abstract INode recordModification(final Snapshot latest) + abstract INode recordModification(final int latestSnapshotId) throws QuotaExceededException; /** Check whether it's a reference. */ @@ -330,12 +336,13 @@ public abstract class INode implements INodeAttributes, Diff.Element { * snapshot in its diff list. Recursively clean its children. * * - * @param snapshot - * The snapshot to delete. Null means to delete the current + * @param snapshotId + * The id of the snapshot to delete. + * {@link Snapshot#CURRENT_STATE_ID} means to delete the current * file/directory. - * @param prior - * The latest snapshot before the to-be-deleted snapshot. When - * deleting a current inode, this parameter captures the latest + * @param priorSnapshotId + * The id of the latest snapshot before the to-be-deleted snapshot. + * When deleting a current inode, this parameter captures the latest * snapshot. * @param collectedBlocks * blocks collected from the descents for further block @@ -345,8 +352,8 @@ public abstract class INode implements INodeAttributes, Diff.Element { * inodeMap * @return quota usage delta when deleting a snapshot */ - public abstract Quota.Counts cleanSubtree(final Snapshot snapshot, - Snapshot prior, BlocksMapUpdateInfo collectedBlocks, + public abstract Quota.Counts cleanSubtree(final int snapshotId, + int priorSnapshotId, BlocksMapUpdateInfo collectedBlocks, List removedINodes, boolean countDiffChange) throws QuotaExceededException; @@ -460,9 +467,10 @@ public abstract class INode implements INodeAttributes, Diff.Element { * @param counts The subtree counts for returning. * @param useCache Whether to use cached quota usage. Note that * {@link WithName} node never uses cache for its subtree. - * @param lastSnapshotId {@link Snapshot#INVALID_ID} indicates the computation - * is in the current tree. Otherwise the id indicates - * the computation range for a {@link WithName} node. + * @param lastSnapshotId {@link Snapshot#CURRENT_STATE_ID} indicates the + * computation is in the current tree. Otherwise the id + * indicates the computation range for a + * {@link WithName} node. * @return The same objects as the counts parameter. */ public abstract Quota.Counts computeQuotaUsage(Quota.Counts counts, @@ -470,7 +478,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { public final Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache) { - return computeQuotaUsage(counts, useCache, Snapshot.INVALID_ID); + return computeQuotaUsage(counts, useCache, Snapshot.CURRENT_STATE_ID); } /** @@ -558,21 +566,22 @@ public abstract class INode implements INodeAttributes, Diff.Element { } /** - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current inode. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the given snapshot; otherwise, get the result from the + * current inode. * @return modification time. */ - abstract long getModificationTime(Snapshot snapshot); + abstract long getModificationTime(int snapshotId); - /** The same as getModificationTime(null). */ + /** The same as getModificationTime(Snapshot.CURRENT_STATE_ID). */ @Override public final long getModificationTime() { - return getModificationTime(null); + return getModificationTime(Snapshot.CURRENT_STATE_ID); } /** Update modification time if it is larger than the current value. */ - public abstract INode updateModificationTime(long mtime, Snapshot latest) + public abstract INode updateModificationTime(long mtime, int latestSnapshotId) throws QuotaExceededException; /** Set the last modification time of inode. */ @@ -580,24 +589,25 @@ public abstract class INode implements INodeAttributes, Diff.Element { /** Set the last modification time of inode. */ public final INode setModificationTime(long modificationTime, - Snapshot latest) throws QuotaExceededException { - final INode nodeToUpdate = recordModification(latest); + int latestSnapshotId) throws QuotaExceededException { + final INode nodeToUpdate = recordModification(latestSnapshotId); nodeToUpdate.setModificationTime(modificationTime); return nodeToUpdate; } /** - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current inode. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the given snapshot; otherwise, get the result from the + * current inode. * @return access time */ - abstract long getAccessTime(Snapshot snapshot); + abstract long getAccessTime(int snapshotId); - /** The same as getAccessTime(null). */ + /** The same as getAccessTime(Snapshot.CURRENT_STATE_ID). */ @Override public final long getAccessTime() { - return getAccessTime(null); + return getAccessTime(Snapshot.CURRENT_STATE_ID); } /** @@ -608,9 +618,9 @@ public abstract class INode implements INodeAttributes, Diff.Element { /** * Set last access time of inode. */ - public final INode setAccessTime(long accessTime, Snapshot latest) + public final INode setAccessTime(long accessTime, int latestSnapshotId) throws QuotaExceededException { - final INode nodeToUpdate = recordModification(latest); + final INode nodeToUpdate = recordModification(latestSnapshotId); nodeToUpdate.setAccessTime(accessTime); return nodeToUpdate; } @@ -679,13 +689,15 @@ public abstract class INode implements INodeAttributes, Diff.Element { @VisibleForTesting public final StringBuffer dumpTreeRecursively() { final StringWriter out = new StringWriter(); - dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null); + dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), + Snapshot.CURRENT_STATE_ID); return out.getBuffer(); } @VisibleForTesting public final void dumpTreeRecursively(PrintStream out) { - dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null); + dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), + Snapshot.CURRENT_STATE_ID); } /** @@ -694,7 +706,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { */ @VisibleForTesting public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, - Snapshot snapshot) { + int snapshotId) { out.print(prefix); out.print(" "); final String name = getLocalName(); @@ -703,7 +715,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { out.print(getObjectString()); out.print("), "); out.print(getParentString()); - out.print(", " + getPermissionStatus(snapshot)); + out.print(", " + getPermissionStatus(snapshotId)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index a52ae3e72f4..83cb0a4eb94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -204,9 +204,9 @@ public class INodeDirectory extends INodeWithAdditionalFields } @Override - public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) { + public INodeDirectoryAttributes getSnapshotINode(int snapshotId) { DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); - return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshot, this); + return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshotId, this); } @Override @@ -217,12 +217,13 @@ public class INodeDirectory extends INodeWithAdditionalFields /** Replace itself with an {@link INodeDirectorySnapshottable}. */ public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable( - Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException { + int latestSnapshotId, final INodeMap inodeMap) + throws QuotaExceededException { Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable), "this is already an INodeDirectorySnapshottable, this=%s", this); final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this); replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs() - .saveSelf2Snapshot(latest, s, this); + .saveSelf2Snapshot(latestSnapshotId, s, this); return s; } @@ -289,8 +290,8 @@ public class INodeDirectory extends INodeWithAdditionalFields } INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild, - Snapshot latest) { - Preconditions.checkArgument(latest != null); + int latestSnapshotId) { + Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID); if (oldChild instanceof INodeReference.WithName) { return (INodeReference.WithName)oldChild; } @@ -304,22 +305,23 @@ public class INodeDirectory extends INodeWithAdditionalFields withCount = new INodeReference.WithCount(null, oldChild); } final INodeReference.WithName ref = new INodeReference.WithName(this, - withCount, oldChild.getLocalNameBytes(), latest.getId()); + withCount, oldChild.getLocalNameBytes(), latestSnapshotId); replaceChild(oldChild, ref, null); return ref; } @Override - public INodeDirectory recordModification(Snapshot latest) + public INodeDirectory recordModification(int latestSnapshotId) throws QuotaExceededException { - if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) { + if (isInLatestSnapshot(latestSnapshotId) + && !shouldRecordInSrcSnapshot(latestSnapshotId)) { // add snapshot feature if necessary DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); if (sf == null) { sf = addSnapshotFeature(null); } // record self in the diff list if necessary - sf.getDiffs().saveSelf2Snapshot(latest, this, null); + sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null); } return this; } @@ -329,9 +331,9 @@ public class INodeDirectory extends INodeWithAdditionalFields * * @return the child inode, which may be replaced. */ - public INode saveChild2Snapshot(final INode child, final Snapshot latest, + public INode saveChild2Snapshot(final INode child, final int latestSnapshotId, final INode snapshotCopy) throws QuotaExceededException { - if (latest == null) { + if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) { return child; } @@ -340,42 +342,45 @@ public class INodeDirectory extends INodeWithAdditionalFields if (sf == null) { sf = this.addSnapshotFeature(null); } - return sf.saveChild2Snapshot(this, child, latest, snapshotCopy); + return sf.saveChild2Snapshot(this, child, latestSnapshotId, snapshotCopy); } /** * @param name the name of the child - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current directory. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the corresponding snapshot; otherwise, get the result from + * the current directory. * @return the child inode. */ - public INode getChild(byte[] name, Snapshot snapshot) { + public INode getChild(byte[] name, int snapshotId) { DirectoryWithSnapshotFeature sf; - if (snapshot == null || (sf = getDirectoryWithSnapshotFeature()) == null) { + if (snapshotId == Snapshot.CURRENT_STATE_ID || + (sf = getDirectoryWithSnapshotFeature()) == null) { ReadOnlyList c = getCurrentChildrenList(); final int i = ReadOnlyList.Util.binarySearch(c, name); return i < 0 ? null : c.get(i); } - return sf.getChild(this, name, snapshot); + return sf.getChild(this, name, snapshotId); } /** - * @param snapshot - * if it is not null, get the result from the given snapshot; - * otherwise, get the result from the current directory. + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the corresponding snapshot; otherwise, get the result from + * the current directory. * @return the current children list if the specified snapshot is null; * otherwise, return the children list corresponding to the snapshot. * Note that the returned list is never null. */ - public ReadOnlyList getChildrenList(final Snapshot snapshot) { + public ReadOnlyList getChildrenList(final int snapshotId) { DirectoryWithSnapshotFeature sf; - if (snapshot == null + if (snapshotId == Snapshot.CURRENT_STATE_ID || (sf = this.getDirectoryWithSnapshotFeature()) == null) { return getCurrentChildrenList(); } - return sf.getChildrenList(this, snapshot); + return sf.getChildrenList(this, snapshotId); } private ReadOnlyList getCurrentChildrenList() { @@ -450,15 +455,15 @@ public class INodeDirectory extends INodeWithAdditionalFields /** * Remove the specified child from this directory. */ - public boolean removeChild(INode child, Snapshot latest) + public boolean removeChild(INode child, int latestSnapshotId) throws QuotaExceededException { - if (isInLatestSnapshot(latest)) { + if (isInLatestSnapshot(latestSnapshotId)) { // create snapshot feature if necessary DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature(); if (sf == null) { sf = this.addSnapshotFeature(null); } - return sf.removeChild(this, child, latest); + return sf.removeChild(this, child, latestSnapshotId); } return removeChild(child); } @@ -493,24 +498,24 @@ public class INodeDirectory extends INodeWithAdditionalFields * otherwise, return true; */ public boolean addChild(INode node, final boolean setModTime, - final Snapshot latest) throws QuotaExceededException { + final int latestSnapshotId) throws QuotaExceededException { final int low = searchChildren(node.getLocalNameBytes()); if (low >= 0) { return false; } - if (isInLatestSnapshot(latest)) { + if (isInLatestSnapshot(latestSnapshotId)) { // create snapshot feature if necessary DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature(); if (sf == null) { sf = this.addSnapshotFeature(null); } - return sf.addChild(this, node, setModTime, latest); + return sf.addChild(this, node, setModTime, latestSnapshotId); } addChild(node, low); if (setModTime) { // update modification time of the parent directory - updateModificationTime(node.getModificationTime(), latest); + updateModificationTime(node.getModificationTime(), latestSnapshotId); } return true; } @@ -548,10 +553,9 @@ public class INodeDirectory extends INodeWithAdditionalFields // we are computing the quota usage for a specific snapshot here, i.e., the // computation only includes files/directories that exist at the time of the // given snapshot - if (sf != null && lastSnapshotId != Snapshot.INVALID_ID + if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID && !(useCache && isQuotaSet())) { - Snapshot lastSnapshot = sf.getDiffs().getSnapshotById(lastSnapshotId); - ReadOnlyList childrenList = getChildrenList(lastSnapshot); + ReadOnlyList childrenList = getChildrenList(lastSnapshotId); for (INode child : childrenList) { child.computeQuotaUsage(counts, useCache, lastSnapshotId); } @@ -607,7 +611,7 @@ public class INodeDirectory extends INodeWithAdditionalFields ContentSummaryComputationContext computeDirectoryContentSummary( ContentSummaryComputationContext summary) { - ReadOnlyList childrenList = getChildrenList(null); + ReadOnlyList childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. for (int i = 0; i < childrenList.size(); i++) { @@ -629,7 +633,7 @@ public class INodeDirectory extends INodeWithAdditionalFields break; } // Obtain the children list again since it may have been modified. - childrenList = getChildrenList(null); + childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID); // Reposition in case the children list is changed. Decrement by 1 // since it will be incremented when loops. i = nextChild(childrenList, childName) - 1; @@ -668,21 +672,16 @@ public class INodeDirectory extends INodeWithAdditionalFields * The reference node to be removed/replaced * @param newChild * The node to be added back - * @param latestSnapshot - * The latest snapshot. Note this may not be the last snapshot in the - * diff list, since the src tree of the current rename operation - * may be the dst tree of a previous rename. * @throws QuotaExceededException should not throw this exception */ public void undoRename4ScrParent(final INodeReference oldChild, - final INode newChild, Snapshot latestSnapshot) - throws QuotaExceededException { + final INode newChild) throws QuotaExceededException { DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); Preconditions.checkState(sf != null, "Directory does not have snapshot feature"); sf.getDiffs().removeChild(ListType.DELETED, oldChild); sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild); - addChild(newChild, true, null); + addChild(newChild, true, Snapshot.CURRENT_STATE_ID); } /** @@ -691,16 +690,14 @@ public class INodeDirectory extends INodeWithAdditionalFields * and delete possible record in the deleted list. */ public void undoRename4DstParent(final INode deletedChild, - Snapshot latestSnapshot) throws QuotaExceededException { + int latestSnapshotId) throws QuotaExceededException { DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); Preconditions.checkState(sf != null, "Directory does not have snapshot feature"); boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED, deletedChild); - // pass null for inodeMap since the parent node will not get replaced when - // undoing rename - final boolean added = addChild(deletedChild, true, removeDeletedChild ? null - : latestSnapshot); + int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId; + final boolean added = addChild(deletedChild, true, sid); // update quota usage if adding is successfully and the old child has not // been stored in deleted list before if (added && !removeDeletedChild) { @@ -722,8 +719,8 @@ public class INodeDirectory extends INodeWithAdditionalFields } /** Call cleanSubtree(..) recursively down the subtree. */ - public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, - Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, + public Quota.Counts cleanSubtreeRecursively(final int snapshot, + int prior, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final Map excludedNodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); @@ -732,9 +729,10 @@ public class INodeDirectory extends INodeWithAdditionalFields // to its latest previous snapshot. (besides, we also need to consider nodes // created after prior but before snapshot. this will be done in // DirectoryWithSnapshotFeature) - Snapshot s = snapshot != null && prior != null ? prior : snapshot; + int s = snapshot != Snapshot.CURRENT_STATE_ID + && prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot; for (INode child : getChildrenList(s)) { - if (snapshot != null && excludedNodes != null + if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null && excludedNodes.containsKey(child)) { continue; } else { @@ -753,7 +751,7 @@ public class INodeDirectory extends INodeWithAdditionalFields if (sf != null) { sf.clear(this, collectedBlocks, removedINodes); } - for (INode child : getChildrenList(null)) { + for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) { child.destroyAndCollectBlocks(collectedBlocks, removedINodes); } clear(); @@ -761,18 +759,19 @@ public class INodeDirectory extends INodeWithAdditionalFields } @Override - public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, + public Quota.Counts cleanSubtree(final int snapshotId, int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); // there is snapshot data if (sf != null) { - return sf.cleanDirectory(this, snapshot, prior, collectedBlocks, - removedINodes, countDiffChange); + return sf.cleanDirectory(this, snapshotId, priorSnapshotId, + collectedBlocks, removedINodes, countDiffChange); } // there is no snapshot data - if (prior == null && snapshot == null) { + if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID + && snapshotId == Snapshot.CURRENT_STATE_ID) { // destroy the whole subtree and collect blocks that should be deleted Quota.Counts counts = Quota.Counts.newInstance(); this.computeQuotaUsage(counts, true); @@ -780,7 +779,7 @@ public class INodeDirectory extends INodeWithAdditionalFields return counts; } else { // process recursively down the subtree - Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior, + Quota.Counts counts = cleanSubtreeRecursively(snapshotId, priorSnapshotId, collectedBlocks, removedINodes, null, countDiffChange); if (isQuotaSet()) { getDirectoryWithQuotaFeature().addSpaceConsumed2Cache( @@ -816,7 +815,7 @@ public class INodeDirectory extends INodeWithAdditionalFields @VisibleForTesting @Override public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, - final Snapshot snapshot) { + final int snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); out.print(", childrenSize=" + getChildrenList(snapshot).size()); final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); @@ -824,7 +823,7 @@ public class INodeDirectory extends INodeWithAdditionalFields out.print(", " + q); } if (this instanceof Snapshot.Root) { - out.print(", snapshotId=" + snapshot.getId()); + out.print(", snapshotId=" + snapshot); } out.println(); @@ -869,7 +868,7 @@ public class INodeDirectory extends INodeWithAdditionalFields for(final Iterator i = subs.iterator(); i.hasNext();) { final SnapshotAndINode pair = i.next(); prefix.append(i.hasNext()? DUMPTREE_EXCEPT_LAST_ITEM: DUMPTREE_LAST_ITEM); - pair.inode.dumpTreeRecursively(out, prefix, pair.snapshot); + pair.inode.dumpTreeRecursively(out, prefix, pair.snapshotId); prefix.setLength(prefix.length() - 2); } } @@ -877,20 +876,16 @@ public class INodeDirectory extends INodeWithAdditionalFields /** A pair of Snapshot and INode objects. */ protected static class SnapshotAndINode { - public final Snapshot snapshot; + public final int snapshotId; public final INode inode; - public SnapshotAndINode(Snapshot snapshot, INode inode) { - this.snapshot = snapshot; + public SnapshotAndINode(int snapshot, INode inode) { + this.snapshotId = snapshot; this.inode = inode; } - - public SnapshotAndINode(Snapshot snapshot) { - this(snapshot, snapshot.getRoot()); - } } - public final int getChildrenNum(final Snapshot snapshot) { - return getChildrenList(snapshot).size(); + public final int getChildrenNum(final int snapshotId) { + return getChildrenList(snapshotId).size(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 1474ec791ca..ab9339f7e6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; +import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; + import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; @@ -282,26 +285,27 @@ public class INodeFile extends INodeWithAdditionalFields } @Override - public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) { + public INodeFileAttributes getSnapshotINode(final int snapshotId) { FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); if (sf != null) { - return sf.getDiffs().getSnapshotINode(snapshot, this); + return sf.getDiffs().getSnapshotINode(snapshotId, this); } else { return this; } } @Override - public INodeFile recordModification(final Snapshot latest) + public INodeFile recordModification(final int latestSnapshotId) throws QuotaExceededException { - if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) { + if (isInLatestSnapshot(latestSnapshotId) + && !shouldRecordInSrcSnapshot(latestSnapshotId)) { // the file is in snapshot, create a snapshot feature if it does not have FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); if (sf == null) { sf = addSnapshotFeature(null); } // record self in the diff list if necessary - sf.getDiffs().saveSelf2Snapshot(latest, this, null); + sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null); } return this; } @@ -317,23 +321,22 @@ public class INodeFile extends INodeWithAdditionalFields /* End of Snapshot Feature */ /** @return the replication factor of the file. */ - public final short getFileReplication(Snapshot snapshot) { - if (snapshot != null) { + public final short getFileReplication(int snapshot) { + if (snapshot != CURRENT_STATE_ID) { return getSnapshotINode(snapshot).getFileReplication(); } - return HeaderFormat.getReplication(header); } /** The same as getFileReplication(null). */ @Override // INodeFileAttributes public final short getFileReplication() { - return getFileReplication(null); + return getFileReplication(CURRENT_STATE_ID); } @Override // BlockCollection public short getBlockReplication() { - short max = getFileReplication(null); + short max = getFileReplication(CURRENT_STATE_ID); FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); if (sf != null) { short maxInSnapshot = sf.getMaxBlockRepInDiffs(); @@ -351,9 +354,10 @@ public class INodeFile extends INodeWithAdditionalFields } /** Set the replication factor of this file. */ - public final INodeFile setFileReplication(short replication, Snapshot latest, - final INodeMap inodeMap) throws QuotaExceededException { - final INodeFile nodeToUpdate = recordModification(latest); + public final INodeFile setFileReplication(short replication, + int latestSnapshotId, final INodeMap inodeMap) + throws QuotaExceededException { + final INodeFile nodeToUpdate = recordModification(latestSnapshotId); nodeToUpdate.setFileReplication(replication); return nodeToUpdate; } @@ -431,22 +435,22 @@ public class INodeFile extends INodeWithAdditionalFields } @Override - public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, + public Quota.Counts cleanSubtree(final int snapshot, int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if (sf != null) { - return sf.cleanFile(this, snapshot, prior, collectedBlocks, + return sf.cleanFile(this, snapshot, priorSnapshotId, collectedBlocks, removedINodes, countDiffChange); } Quota.Counts counts = Quota.Counts.newInstance(); - if (snapshot == null && prior == null) { + if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) { // this only happens when deleting the current file and the file is not // in any snapshot computeQuotaUsage(counts, false); destroyAndCollectBlocks(collectedBlocks, removedINodes); - } else if (snapshot == null && prior != null) { + } else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) { // when deleting the current file and the file is in snapshot, we should // clean the 0-sized block if the file is UC FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); @@ -490,17 +494,18 @@ public class INodeFile extends INodeWithAdditionalFields FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if (sf != null) { FileDiffList fileDiffList = sf.getDiffs(); - Snapshot last = fileDiffList.getLastSnapshot(); + int last = fileDiffList.getLastSnapshotId(); List diffs = fileDiffList.asList(); - if (lastSnapshotId == Snapshot.INVALID_ID || last == null) { + if (lastSnapshotId == Snapshot.CURRENT_STATE_ID + || last == Snapshot.CURRENT_STATE_ID) { nsDelta += diffs.size(); dsDelta = diskspaceConsumed(); - } else if (last.getId() < lastSnapshotId) { + } else if (last < lastSnapshotId) { dsDelta = computeFileSize(true, false) * getFileReplication(); } else { - Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId); - dsDelta = diskspaceConsumed(s); + int sid = fileDiffList.getSnapshotById(lastSnapshotId); + dsDelta = diskspaceConsumed(sid); } } else { dsDelta = diskspaceConsumed(); @@ -511,7 +516,7 @@ public class INodeFile extends INodeWithAdditionalFields } @Override - public final ContentSummaryComputationContext computeContentSummary( + public final ContentSummaryComputationContext computeContentSummary( final ContentSummaryComputationContext summary) { computeContentSummary4Snapshot(summary.getCounts()); computeContentSummary4Current(summary.getCounts()); @@ -550,23 +555,21 @@ public class INodeFile extends INodeWithAdditionalFields /** The same as computeFileSize(null). */ public final long computeFileSize() { - return computeFileSize(null); + return computeFileSize(CURRENT_STATE_ID); } /** * Compute file size of the current file if the given snapshot is null; * otherwise, get the file size from the given snapshot. */ - public final long computeFileSize(Snapshot snapshot) { + public final long computeFileSize(int snapshotId) { FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); - if (snapshot != null && sf != null) { - final FileDiff d = sf.getDiffs().getDiff( - snapshot); + if (snapshotId != CURRENT_STATE_ID && sf != null) { + final FileDiff d = sf.getDiffs().getDiffById(snapshotId); if (d != null) { return d.getFileSize(); } } - return computeFileSize(true, false); } @@ -617,9 +620,10 @@ public class INodeFile extends INodeWithAdditionalFields return computeFileSize(true, true) * getBlockReplication(); } - public final long diskspaceConsumed(Snapshot lastSnapshot) { - if (lastSnapshot != null) { - return computeFileSize(lastSnapshot) * getFileReplication(lastSnapshot); + public final long diskspaceConsumed(int lastSnapshotId) { + if (lastSnapshotId != CURRENT_STATE_ID) { + return computeFileSize(lastSnapshotId) + * getFileReplication(lastSnapshotId); } else { return diskspaceConsumed(); } @@ -648,9 +652,9 @@ public class INodeFile extends INodeWithAdditionalFields @VisibleForTesting @Override public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, - final Snapshot snapshot) { - super.dumpTreeRecursively(out, prefix, snapshot); - out.print(", fileSize=" + computeFileSize(snapshot)); + final int snapshotId) { + super.dumpTreeRecursively(out, prefix, snapshotId); + out.print(", fileSize=" + computeFileSize(snapshotId)); // only compare the first block out.print(", blocks="); out.print(blocks == null || blocks.length == 0? null: blocks[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index b00bdd7e789..5ffcc21f5bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -23,7 +23,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.Quota.Counts; -import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; @@ -89,7 +88,8 @@ public class INodeMap { "", "", new FsPermission((short) 0)), 0, 0) { @Override - INode recordModification(Snapshot latest) throws QuotaExceededException { + INode recordModification(int latestSnapshotId) + throws QuotaExceededException { return null; } @@ -112,7 +112,7 @@ public class INodeMap { } @Override - public Counts cleanSubtree(Snapshot snapshot, Snapshot prior, + public Counts cleanSubtree(int snapshotId, int priorSnapshotId, BlocksMapUpdateInfo collectedBlocks, List removedINodes, boolean countDiffChange) throws QuotaExceededException { return null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 6fdf574ebac..a1e3c116a78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -91,7 +91,7 @@ public abstract class INodeReference extends INode { * method to identify the snapshot which is the latest snapshot before the * reference node's creation. */ - static Snapshot getPriorSnapshot(INodeReference ref) { + static int getPriorSnapshot(INodeReference ref) { WithCount wc = (WithCount) ref.getReferredINode(); WithName wn = null; if (ref instanceof DstReference) { @@ -111,7 +111,7 @@ public abstract class INodeReference extends INode { } } } - return null; + return Snapshot.NO_SNAPSHOT_ID; } private INode referred; @@ -185,13 +185,13 @@ public abstract class INodeReference extends INode { } @Override - public final PermissionStatus getPermissionStatus(Snapshot snapshot) { - return referred.getPermissionStatus(snapshot); + public final PermissionStatus getPermissionStatus(int snapshotId) { + return referred.getPermissionStatus(snapshotId); } @Override - public final String getUserName(Snapshot snapshot) { - return referred.getUserName(snapshot); + public final String getUserName(int snapshotId) { + return referred.getUserName(snapshotId); } @Override @@ -200,8 +200,8 @@ public abstract class INodeReference extends INode { } @Override - public final String getGroupName(Snapshot snapshot) { - return referred.getGroupName(snapshot); + public final String getGroupName(int snapshotId) { + return referred.getGroupName(snapshotId); } @Override @@ -210,8 +210,8 @@ public abstract class INodeReference extends INode { } @Override - public final FsPermission getFsPermission(Snapshot snapshot) { - return referred.getFsPermission(snapshot); + public final FsPermission getFsPermission(int snapshotId) { + return referred.getFsPermission(snapshotId); } @Override public final short getFsPermissionShort() { @@ -229,14 +229,14 @@ public abstract class INodeReference extends INode { } @Override - public final long getModificationTime(Snapshot snapshot) { - return referred.getModificationTime(snapshot); + public final long getModificationTime(int snapshotId) { + return referred.getModificationTime(snapshotId); } @Override - public final INode updateModificationTime(long mtime, Snapshot latest) + public final INode updateModificationTime(long mtime, int latestSnapshotId) throws QuotaExceededException { - return referred.updateModificationTime(mtime, latest); + return referred.updateModificationTime(mtime, latestSnapshotId); } @Override @@ -245,8 +245,8 @@ public abstract class INodeReference extends INode { } @Override - public final long getAccessTime(Snapshot snapshot) { - return referred.getAccessTime(snapshot); + public final long getAccessTime(int snapshotId) { + return referred.getAccessTime(snapshotId); } @Override @@ -255,15 +255,15 @@ public abstract class INodeReference extends INode { } @Override - final INode recordModification(Snapshot latest) + final INode recordModification(int latestSnapshotId) throws QuotaExceededException { - referred.recordModification(latest); + referred.recordModification(latestSnapshotId); // reference is never replaced return this; } @Override // used by WithCount - public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, + public Quota.Counts cleanSubtree(int snapshot, int prior, BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { return referred.cleanSubtree(snapshot, prior, collectedBlocks, @@ -291,8 +291,8 @@ public abstract class INodeReference extends INode { } @Override - public final INodeAttributes getSnapshotINode(Snapshot snapshot) { - return referred.getSnapshotINode(snapshot); + public final INodeAttributes getSnapshotINode(int snapshotId) { + return referred.getSnapshotINode(snapshotId); } @Override @@ -308,7 +308,7 @@ public abstract class INodeReference extends INode { @Override public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, - final Snapshot snapshot) { + final int snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); if (this instanceof DstReference) { out.print(", dstSnapshotId=" + ((DstReference) this).dstSnapshotId); @@ -327,7 +327,7 @@ public abstract class INodeReference extends INode { } public int getDstSnapshotId() { - return Snapshot.INVALID_ID; + return Snapshot.CURRENT_STATE_ID; } /** An anonymous reference with reference count. */ @@ -457,34 +457,35 @@ public abstract class INodeReference extends INode { // node happened before the rename of its ancestor. This should be // impossible since for WithName node we only count its children at the // time of the rename. - Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId); + Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID + || this.lastSnapshotId >= lastSnapshotId); final INode referred = this.getReferredINode().asReference() .getReferredINode(); // We will continue the quota usage computation using the same snapshot id // as time line (if the given snapshot id is valid). Also, we cannot use // cache for the referred node since its cached quota may have already // been updated by changes in the current tree. - int id = lastSnapshotId > Snapshot.INVALID_ID ? + int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? lastSnapshotId : this.lastSnapshotId; return referred.computeQuotaUsage(counts, false, id); } @Override - public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, + public Quota.Counts cleanSubtree(final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { // since WithName node resides in deleted list acting as a snapshot copy, // the parameter snapshot must be non-null - Preconditions.checkArgument(snapshot != null); - // if prior is null, we need to check snapshot belonging to the previous - // WithName instance - if (prior == null) { + Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID); + // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to the + // previous WithName instance + if (prior == Snapshot.NO_SNAPSHOT_ID) { prior = getPriorSnapshot(this); } - if (prior != null - && Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) { + if (prior != Snapshot.NO_SNAPSHOT_ID + && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) { return Quota.Counts.newInstance(); } @@ -496,7 +497,7 @@ public abstract class INodeReference extends INode { -counts.get(Quota.DISKSPACE), true); } - if (snapshot.getId() < lastSnapshotId) { + if (snapshot < lastSnapshotId) { // for a WithName node, when we compute its quota usage, we only count // in all the nodes existing at the time of the corresponding rename op. // Thus if we are deleting a snapshot before/at the snapshot associated @@ -509,16 +510,16 @@ public abstract class INodeReference extends INode { @Override public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { - Snapshot snapshot = getSelfSnapshot(); + int snapshot = getSelfSnapshot(); if (removeReference(this) <= 0) { getReferredINode().destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { - Snapshot prior = getPriorSnapshot(this); + int prior = getPriorSnapshot(this); INode referred = getReferredINode().asReference().getReferredINode(); - if (snapshot != null) { - if (prior != null && snapshot.getId() <= prior.getId()) { + if (snapshot != Snapshot.NO_SNAPSHOT_ID) { + if (prior != Snapshot.NO_SNAPSHOT_ID && snapshot <= prior) { // the snapshot to be deleted has been deleted while traversing // the src tree of the previous rename operation. This usually // happens when rename's src and dst are under the same @@ -545,9 +546,9 @@ public abstract class INodeReference extends INode { } } - private Snapshot getSelfSnapshot() { + private int getSelfSnapshot() { INode referred = getReferredINode().asReference().getReferredINode(); - Snapshot snapshot = null; + int snapshot = Snapshot.NO_SNAPSHOT_ID; if (referred.isFile() && referred.asFile().isWithSnapshot()) { snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId); } else if (referred.isDirectory()) { @@ -569,7 +570,7 @@ public abstract class INodeReference extends INode { * latest snapshot. Otherwise changes will be recorded to the snapshot * belonging to the src of the rename. * - * {@link Snapshot#INVALID_ID} means no dstSnapshot (e.g., src of the + * {@link Snapshot#NO_SNAPSHOT_ID} means no dstSnapshot (e.g., src of the * first-time rename). */ private final int dstSnapshotId; @@ -587,25 +588,27 @@ public abstract class INodeReference extends INode { } @Override - public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, + public Quota.Counts cleanSubtree(int snapshot, int prior, BlocksMapUpdateInfo collectedBlocks, List removedINodes, final boolean countDiffChange) throws QuotaExceededException { - if (snapshot == null && prior == null) { + if (snapshot == Snapshot.CURRENT_STATE_ID + && prior == Snapshot.NO_SNAPSHOT_ID) { Quota.Counts counts = Quota.Counts.newInstance(); this.computeQuotaUsage(counts, true); destroyAndCollectBlocks(collectedBlocks, removedINodes); return counts; } else { - // if prior is null, we need to check snapshot belonging to the previous - // WithName instance - if (prior == null) { + // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to + // the previous WithName instance + if (prior == Snapshot.NO_SNAPSHOT_ID) { prior = getPriorSnapshot(this); } - // if prior is not null, and prior is not before the to-be-deleted - // snapshot, we can quit here and leave the snapshot deletion work to - // the src tree of rename - if (snapshot != null && prior != null - && Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) { + // if prior is not NO_SNAPSHOT_ID, and prior is not before the + // to-be-deleted snapshot, we can quit here and leave the snapshot + // deletion work to the src tree of rename + if (snapshot != Snapshot.CURRENT_STATE_ID + && prior != Snapshot.NO_SNAPSHOT_ID + && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) { return Quota.Counts.newInstance(); } return getReferredINode().cleanSubtree(snapshot, prior, @@ -632,12 +635,12 @@ public abstract class INodeReference extends INode { } else { // we will clean everything, including files, directories, and // snapshots, that were created after this prior snapshot - Snapshot prior = getPriorSnapshot(this); + int prior = getPriorSnapshot(this); // prior must be non-null, otherwise we do not have any previous // WithName nodes, and the reference number will be 0. - Preconditions.checkState(prior != null); + Preconditions.checkState(prior != Snapshot.NO_SNAPSHOT_ID); // identify the snapshot created after prior - Snapshot snapshot = getSelfSnapshot(prior); + int snapshot = getSelfSnapshot(prior); INode referred = getReferredINode().asReference().getReferredINode(); if (referred.isFile()) { @@ -671,23 +674,23 @@ public abstract class INodeReference extends INode { } } - private Snapshot getSelfSnapshot(final Snapshot prior) { + private int getSelfSnapshot(final int prior) { WithCount wc = (WithCount) getReferredINode().asReference(); INode referred = wc.getReferredINode(); - Snapshot lastSnapshot = null; + int lastSnapshot = Snapshot.CURRENT_STATE_ID; if (referred.isFile() && referred.asFile().isWithSnapshot()) { - lastSnapshot = referred.asFile().getDiffs().getLastSnapshot(); + lastSnapshot = referred.asFile().getDiffs().getLastSnapshotId(); } else if (referred.isDirectory()) { DirectoryWithSnapshotFeature sf = referred.asDirectory() .getDirectoryWithSnapshotFeature(); if (sf != null) { - lastSnapshot = sf.getLastSnapshot(); + lastSnapshot = sf.getLastSnapshotId(); } } - if (lastSnapshot != null && !lastSnapshot.equals(prior)) { + if (lastSnapshot != Snapshot.CURRENT_STATE_ID && lastSnapshot != prior) { return lastSnapshot; } else { - return null; + return Snapshot.CURRENT_STATE_ID; } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index 69570e3bd3a..16d73268934 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -45,10 +45,10 @@ public class INodeSymlink extends INodeWithAdditionalFields { } @Override - INode recordModification(Snapshot latest) throws QuotaExceededException { - if (isInLatestSnapshot(latest)) { + INode recordModification(int latestSnapshotId) throws QuotaExceededException { + if (isInLatestSnapshot(latestSnapshotId)) { INodeDirectory parent = getParent(); - parent.saveChild2Snapshot(this, latest, new INodeSymlink(this)); + parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this)); } return this; } @@ -74,10 +74,11 @@ public class INodeSymlink extends INodeWithAdditionalFields { } @Override - public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, + public Quota.Counts cleanSubtree(final int snapshotId, int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) { - if (snapshot == null && prior == null) { + if (snapshotId == Snapshot.CURRENT_STATE_ID + && priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) { destroyAndCollectBlocks(collectedBlocks, removedINodes); } return Quota.Counts.newInstance(1, 0); @@ -105,7 +106,7 @@ public class INodeSymlink extends INodeWithAdditionalFields { @Override public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, - final Snapshot snapshot) { + final int snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); out.println(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index 4b311c20336..d9fdc41a84e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -154,9 +154,9 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final PermissionStatus getPermissionStatus(Snapshot snapshot) { - return new PermissionStatus(getUserName(snapshot), getGroupName(snapshot), - getFsPermission(snapshot)); + final PermissionStatus getPermissionStatus(int snapshotId) { + return new PermissionStatus(getUserName(snapshotId), getGroupName(snapshotId), + getFsPermission(snapshotId)); } private final void updatePermissionStatus(PermissionStatusFormat f, long n) { @@ -164,9 +164,9 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final String getUserName(Snapshot snapshot) { - if (snapshot != null) { - return getSnapshotINode(snapshot).getUserName(); + final String getUserName(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getUserName(); } int n = (int)PermissionStatusFormat.USER.retrieve(permission); @@ -180,9 +180,9 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final String getGroupName(Snapshot snapshot) { - if (snapshot != null) { - return getSnapshotINode(snapshot).getGroupName(); + final String getGroupName(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getGroupName(); } int n = (int)PermissionStatusFormat.GROUP.retrieve(permission); @@ -196,9 +196,9 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final FsPermission getFsPermission(Snapshot snapshot) { - if (snapshot != null) { - return getSnapshotINode(snapshot).getFsPermission(); + final FsPermission getFsPermission(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getFsPermission(); } return new FsPermission(getFsPermissionShort()); @@ -220,9 +220,9 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final long getModificationTime(Snapshot snapshot) { - if (snapshot != null) { - return getSnapshotINode(snapshot).getModificationTime(); + final long getModificationTime(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getModificationTime(); } return this.modificationTime; @@ -231,13 +231,13 @@ public abstract class INodeWithAdditionalFields extends INode /** Update modification time if it is larger than the current value. */ @Override - public final INode updateModificationTime(long mtime, Snapshot latest) + public final INode updateModificationTime(long mtime, int latestSnapshotId) throws QuotaExceededException { Preconditions.checkState(isDirectory()); if (mtime <= modificationTime) { return this; } - return setModificationTime(mtime, latest); + return setModificationTime(mtime, latestSnapshotId); } final void cloneModificationTime(INodeWithAdditionalFields that) { @@ -250,11 +250,10 @@ public abstract class INodeWithAdditionalFields extends INode } @Override - final long getAccessTime(Snapshot snapshot) { - if (snapshot != null) { - return getSnapshotINode(snapshot).getAccessTime(); + final long getAccessTime(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getAccessTime(); } - return accessTime; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index fee7a574c5f..8e103f81200 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -135,8 +135,8 @@ public class INodesInPath { if (!isRef && isDir && dir.isWithSnapshot()) { //if the path is a non-snapshot path, update the latest snapshot. if (!existing.isSnapshot()) { - existing.updateLatestSnapshot(dir.getDirectoryWithSnapshotFeature() - .getLastSnapshot()); + existing.updateLatestSnapshotId(dir.getDirectoryWithSnapshotFeature() + .getLastSnapshotId()); } } else if (isRef && isDir && !lastComp) { // If the curNode is a reference node, need to check its dstSnapshot: @@ -151,16 +151,17 @@ public class INodesInPath { // recordModification method. if (!existing.isSnapshot()) { int dstSnapshotId = curNode.asReference().getDstSnapshotId(); - Snapshot latest = existing.getLatestSnapshot(); - if (latest == null || // no snapshot in dst tree of rename - dstSnapshotId >= latest.getId()) { // the above scenario - Snapshot lastSnapshot = null; + int latest = existing.getLatestSnapshotId(); + if (latest == Snapshot.CURRENT_STATE_ID || // no snapshot in dst tree of rename + (dstSnapshotId != Snapshot.CURRENT_STATE_ID && + dstSnapshotId >= latest)) { // the above scenario + int lastSnapshot = Snapshot.CURRENT_STATE_ID; DirectoryWithSnapshotFeature sf = null; if (curNode.isDirectory() && (sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) { - lastSnapshot = sf.getLastSnapshot(); + lastSnapshot = sf.getLastSnapshotId(); } - existing.setSnapshot(lastSnapshot); + existing.setSnapshotId(lastSnapshot); } } } @@ -206,14 +207,14 @@ public class INodesInPath { curNode = null; } else { curNode = s.getRoot(); - existing.setSnapshot(s); + existing.setSnapshotId(s.getId()); } if (index >= -1) { existing.snapshotRootIndex = existing.numNonNull; } } else { // normal case, and also for resolving file/dir under snapshot root - curNode = dir.getChild(childName, existing.getPathSnapshot()); + curNode = dir.getChild(childName, existing.getPathSnapshotId()); } count++; index++; @@ -245,11 +246,12 @@ public class INodesInPath { */ private int snapshotRootIndex; /** - * For snapshot paths, it is the reference to the snapshot; or null if the - * snapshot does not exist. For non-snapshot paths, it is the reference to - * the latest snapshot found in the path; or null if no snapshot is found. + * For snapshot paths, it is the id of the snapshot; or + * {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For + * non-snapshot paths, it is the id of the latest snapshot found in the path; + * or {@link Snapshot#CURRENT_STATE_ID} if no snapshot is found. */ - private Snapshot snapshot = null; + private int snapshotId = Snapshot.CURRENT_STATE_ID; private INodesInPath(byte[][] path, int number) { this.path = path; @@ -262,29 +264,30 @@ public class INodesInPath { } /** - * For non-snapshot paths, return the latest snapshot found in the path. - * For snapshot paths, return null. + * For non-snapshot paths, return the latest snapshot id found in the path. */ - public Snapshot getLatestSnapshot() { - return isSnapshot? null: snapshot; + public int getLatestSnapshotId() { + Preconditions.checkState(!isSnapshot); + return snapshotId; } /** - * For snapshot paths, return the snapshot specified in the path. - * For non-snapshot paths, return null. + * For snapshot paths, return the id of the snapshot specified in the path. + * For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}. */ - public Snapshot getPathSnapshot() { - return isSnapshot? snapshot: null; + public int getPathSnapshotId() { + return isSnapshot ? snapshotId : Snapshot.CURRENT_STATE_ID; } - private void setSnapshot(Snapshot s) { - snapshot = s; + private void setSnapshotId(int sid) { + snapshotId = sid; } - private void updateLatestSnapshot(Snapshot s) { - if (snapshot == null - || (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) { - snapshot = s; + private void updateLatestSnapshotId(int sid) { + if (snapshotId == Snapshot.CURRENT_STATE_ID + || (sid != Snapshot.CURRENT_STATE_ID && Snapshot.ID_INTEGER_COMPARATOR + .compare(snapshotId, sid) < 0)) { + snapshotId = sid; } } @@ -386,7 +389,7 @@ public class INodesInPath { .append("\n capacity = ").append(capacity) .append("\n isSnapshot = ").append(isSnapshot) .append("\n snapshotRootIndex = ").append(snapshotRootIndex) - .append("\n snapshot = ").append(snapshot); + .append("\n snapshotId = ").append(snapshotId); return b.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index d320b3a68ab..75096a90dc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; @@ -52,8 +52,8 @@ abstract class AbstractINodeDiff> implements Comparable { - /** The snapshot will be obtained after this diff is applied. */ - Snapshot snapshot; + /** The id of the corresponding snapshot. */ + private int snapshotId; /** The snapshot inode data. It is null when there is no change. */ A snapshotINode; /** @@ -64,10 +64,8 @@ abstract class AbstractINodeDiff removedINodes, boolean countDiffChange) throws QuotaExceededException { - int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId()); + int snapshotIndex = Collections.binarySearch(diffs, snapshot); Quota.Counts counts = Quota.Counts.newInstance(); D removed = null; if (snapshotIndex == 0) { - if (prior != null) { + if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before // set the snapshot to latestBefore - diffs.get(snapshotIndex).setSnapshot(prior); - } else { + diffs.get(snapshotIndex).setSnapshotId(prior); + } else { // there is no snapshot before removed = diffs.remove(0); if (countDiffChange) { counts.add(Quota.NAMESPACE, 1); @@ -96,8 +96,8 @@ abstract class AbstractINodeDiffList 0) { final AbstractINodeDiff previous = diffs.get(snapshotIndex - 1); - if (!previous.getSnapshot().equals(prior)) { - diffs.get(snapshotIndex).setSnapshot(prior); + if (previous.getSnapshotId() != prior) { + diffs.get(snapshotIndex).setSnapshotId(prior); } else { // combine the to-be-removed diff with its previous diff removed = diffs.remove(snapshotIndex); @@ -120,10 +120,10 @@ abstract class AbstractINodeDiffList last = getLast(); - return last == null? null: last.getSnapshot(); + return last == null ? Snapshot.CURRENT_STATE_ID : last.getSnapshotId(); } /** @@ -161,60 +161,49 @@ abstract class AbstractINodeDiffList 0 ? i - 1 : -i - 2; - return diffs.get(priorIndex).getSnapshot(); + return diffs.get(priorIndex).getSnapshotId(); } } else { // the one, or the one before if not existing if (i >= 0) { - return diffs.get(i).getSnapshot(); + return diffs.get(i).getSnapshotId(); } else if (i < -1) { - return diffs.get(-i - 2).getSnapshot(); + return diffs.get(-i - 2).getSnapshotId(); } else { // i == -1 - return null; + return Snapshot.NO_SNAPSHOT_ID; } } } - public final Snapshot getPrior(int snapshotId) { + public final int getPrior(int snapshotId) { return getPrior(snapshotId, false); } /** * Update the prior snapshot. */ - final Snapshot updatePrior(Snapshot snapshot, Snapshot prior) { - int id = snapshot == null ? Snapshot.INVALID_ID : snapshot.getId(); - Snapshot s = getPrior(id, true); - if (s != null && - (prior == null || Snapshot.ID_COMPARATOR.compare(s, prior) > 0)) { - return s; + final int updatePrior(int snapshot, int prior) { + int p = getPrior(snapshot, true); + if (p != Snapshot.CURRENT_STATE_ID + && Snapshot.ID_INTEGER_COMPARATOR.compare(p, prior) > 0) { + return p; } return prior; } - - /** - * @return the diff corresponding to the given snapshot. - * When the diff is null, it means that the current state and - * the corresponding snapshot state are the same. - */ - public final D getDiff(Snapshot snapshot) { - return getDiffById(snapshot == null ? - Snapshot.INVALID_ID : snapshot.getId()); - } - private final D getDiffById(final int snapshotId) { - if (snapshotId == Snapshot.INVALID_ID) { + public final D getDiffById(final int snapshotId) { + if (snapshotId == Snapshot.CURRENT_STATE_ID) { return null; } final int i = Collections.binarySearch(diffs, snapshotId); @@ -234,9 +223,9 @@ abstract class AbstractINodeDiffList= 0) { + && Snapshot.ID_INTEGER_COMPARATOR.compare(last.getSnapshotId(), + latestSnapshotId) >= 0) { return last; } else { try { - return addDiff(latest, currentINode); + return addDiff(latestSnapshotId, currentINode); } catch(NSQuotaExceededException e) { e.setMessagePrefix("Failed to record modification for snapshot"); throw e; @@ -298,10 +288,10 @@ abstract class AbstractINodeDiffList createdList, List deletedList) { - super(snapshot, snapshotINode, posteriorDiff); + DirectoryDiff(int snapshotId, INodeDirectoryAttributes snapshotINode, + DirectoryDiff posteriorDiff, int childrenSize, List createdList, + List deletedList, boolean isSnapshotRoot) { + super(snapshotId, snapshotINode, posteriorDiff); this.childrenSize = childrenSize; this.diff = new ChildrenDiff(createdList, deletedList); + this.isSnapshotRoot = isSnapshotRoot; } ChildrenDiff getChildrenDiff() { return diff; } - - /** Is the inode the root of the snapshot? */ + + void setSnapshotRoot(INodeDirectoryAttributes root) { + this.snapshotINode = root; + this.isSnapshotRoot = true; + } + boolean isSnapshotRoot() { - return snapshotINode == snapshot.getRoot(); + return isSnapshotRoot; } @Override @@ -287,7 +293,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { combined.combinePosterior(d.diff, null); } children = combined.apply2Current(ReadOnlyList.Util.asList( - currentDir.getChildrenList(null))); + currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID))); } return children; } @@ -327,7 +333,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { return null; } else if (d.getPosterior() == null) { // no more posterior diff, get from current inode. - return currentDir.getChild(name, null); + return currentDir.getChild(name, Snapshot.CURRENT_STATE_ID); } } } @@ -342,11 +348,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { writeSnapshot(out); out.writeInt(childrenSize); - // write snapshotINode - if (isSnapshotRoot()) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); + // Write snapshotINode + out.writeBoolean(isSnapshotRoot); + if (!isSnapshotRoot) { if (snapshotINode != null) { out.writeBoolean(true); FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out); @@ -373,7 +377,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { extends AbstractINodeDiffList { @Override - DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) { + DirectoryDiff createDiff(int snapshot, INodeDirectory currentDir) { return new DirectoryDiff(snapshot, currentDir); } @@ -424,12 +428,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { /** * Destroy a subtree under a DstReference node. */ - public static void destroyDstSubtree(INode inode, final Snapshot snapshot, - final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, + public static void destroyDstSubtree(INode inode, final int snapshot, + final int prior, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) throws QuotaExceededException { - Preconditions.checkArgument(prior != null); + Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID); if (inode.isReference()) { - if (inode instanceof INodeReference.WithName && snapshot != null) { + if (inode instanceof INodeReference.WithName + && snapshot != Snapshot.CURRENT_STATE_ID) { // this inode has been renamed before the deletion of the DstReference // subtree inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, @@ -447,18 +452,18 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { DirectoryDiffList diffList = sf.getDiffs(); - DirectoryDiff priorDiff = diffList.getDiff(prior); - if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + DirectoryDiff priorDiff = diffList.getDiffById(prior); + if (priorDiff != null && priorDiff.getSnapshotId() == prior) { List dList = priorDiff.diff.getList(ListType.DELETED); excludedNodes = cloneDiffList(dList); } - if (snapshot != null) { + if (snapshot != Snapshot.CURRENT_STATE_ID) { diffList.deleteSnapshotDiff(snapshot, prior, dir, collectedBlocks, removedINodes, true); } - priorDiff = diffList.getDiff(prior); - if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + priorDiff = diffList.getDiffById(prior); + if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorDiff.diff.destroyCreatedList(dir, collectedBlocks, removedINodes); } @@ -478,14 +483,14 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { * deleted list of prior. * @param inode The inode to clean. * @param post The post snapshot. - * @param prior The prior snapshot. + * @param prior The id of the prior snapshot. * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ private static Quota.Counts cleanDeletedINode(INode inode, - final Snapshot post, final Snapshot prior, + final int post, final int prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes, final boolean countDiffChange) + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); Deque queue = new ArrayDeque(); @@ -494,7 +499,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { INode topNode = queue.pollFirst(); if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; - if (wn.getLastSnapshotId() >= post.getId()) { + if (wn.getLastSnapshotId() >= post) { wn.cleanSubtree(post, prior, collectedBlocks, removedINodes, countDiffChange); } @@ -511,8 +516,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { if (sf != null) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. - DirectoryDiff priorDiff = sf.getDiffs().getDiff(prior); - if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior); + if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorChildrenDiff = priorDiff.getChildrenDiff(); counts.add(priorChildrenDiff.destroyCreatedList(dir, collectedBlocks, removedINodes)); @@ -540,8 +545,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { } /** @return the last snapshot. */ - public Snapshot getLastSnapshot() { - return diffs.getLastSnapshot(); + public int getLastSnapshotId() { + return diffs.getLastSnapshotId(); } /** @return the snapshot diff list. */ @@ -565,11 +570,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { * to make sure that parent is in the given snapshot "latest". */ public boolean addChild(INodeDirectory parent, INode inode, - boolean setModTime, Snapshot latest) throws QuotaExceededException { - ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff; + boolean setModTime, int latestSnapshotId) throws QuotaExceededException { + ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId, + parent).diff; int undoInfo = diff.create(inode); - final boolean added = parent.addChild(inode, setModTime, null); + final boolean added = parent.addChild(inode, setModTime, + Snapshot.CURRENT_STATE_ID); if (!added) { diff.undoCreate(inode, undoInfo); } @@ -581,7 +588,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { * needs to make sure that parent is in the given snapshot "latest". */ public boolean removeChild(INodeDirectory parent, INode child, - Snapshot latest) throws QuotaExceededException { + int latestSnapshotId) throws QuotaExceededException { // For a directory that is not a renamed node, if isInLatestSnapshot returns // false, the directory is not in the latest snapshot, thus we do not need // to record the removed child in any snapshot. @@ -593,7 +600,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { // directory node cannot be in any snapshot (not in current tree, nor in // previous src tree). Thus we do not need to record the removed child in // any snapshot. - ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff; + ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId, + parent).diff; UndoInfo undoInfo = diff.delete(child); final boolean removed = parent.removeChild(child); @@ -611,29 +619,29 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { * for the snapshot and return it. */ public ReadOnlyList getChildrenList(INodeDirectory currentINode, - final Snapshot snapshot) { - final DirectoryDiff diff = diffs.getDiff(snapshot); + final int snapshotId) { + final DirectoryDiff diff = diffs.getDiffById(snapshotId); return diff != null ? diff.getChildrenList(currentINode) : currentINode - .getChildrenList(null); + .getChildrenList(Snapshot.CURRENT_STATE_ID); } public INode getChild(INodeDirectory currentINode, byte[] name, - Snapshot snapshot) { - final DirectoryDiff diff = diffs.getDiff(snapshot); + int snapshotId) { + final DirectoryDiff diff = diffs.getDiffById(snapshotId); return diff != null ? diff.getChild(name, true, currentINode) - : currentINode.getChild(name, null); + : currentINode.getChild(name, Snapshot.CURRENT_STATE_ID); } /** Used to record the modification of a symlink node */ public INode saveChild2Snapshot(INodeDirectory currentINode, - final INode child, final Snapshot latest, final INode snapshotCopy) + final INode child, final int latestSnapshotId, final INode snapshotCopy) throws QuotaExceededException { Preconditions.checkArgument(!child.isDirectory(), "child is a directory, child=%s", child); - Preconditions.checkArgument(latest != null); + Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID); - final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, - currentINode); + final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff( + latestSnapshotId, currentINode); if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) { // it was already saved in the latest snapshot earlier. return child; @@ -656,7 +664,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) { for(DirectoryDiff d : diffs) { for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) { - deleted.computeQuotaUsage(counts, false, Snapshot.INVALID_ID); + deleted.computeQuotaUsage(counts, false, Snapshot.CURRENT_STATE_ID); } } counts.add(Quota.NAMESPACE, diffs.asList().size()); @@ -744,14 +752,14 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { } public Quota.Counts cleanDirectory(final INodeDirectory currentINode, - final Snapshot snapshot, Snapshot prior, + final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); Map priorCreated = null; Map priorDeleted = null; - if (snapshot == null) { // delete the current directory + if (snapshot == Snapshot.CURRENT_STATE_ID) { // delete the current directory currentINode.recordModification(prior); // delete everything in created list DirectoryDiff lastDiff = diffs.getLast(); @@ -764,9 +772,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { prior = getDiffs().updatePrior(snapshot, prior); // if there is a snapshot diff associated with prior, we need to record // its original created and deleted list before deleting post - if (prior != null) { - DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); - if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + if (prior != Snapshot.NO_SNAPSHOT_ID) { + DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior); + if (priorDiff != null && priorDiff.getSnapshotId() == prior) { List cList = priorDiff.diff.getList(ListType.CREATED); List dList = priorDiff.diff.getList(ListType.DELETED); priorCreated = cloneDiffList(cList); @@ -774,13 +782,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { } } - counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, currentINode, - collectedBlocks, removedINodes, countDiffChange)); + counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, + currentINode, collectedBlocks, removedINodes, countDiffChange)); // check priorDiff again since it may be created during the diff deletion - if (prior != null) { - DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); - if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + if (prior != Snapshot.NO_SNAPSHOT_ID) { + DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior); + if (priorDiff != null && priorDiff.getSnapshotId() == prior) { // For files/directories created between "prior" and "snapshot", // we need to clear snapshot copies for "snapshot". Note that we must // use null as prior in the cleanSubtree call. Files/directories that @@ -791,8 +799,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { for (INode cNode : priorDiff.getChildrenDiff().getList( ListType.CREATED)) { if (priorCreated.containsKey(cNode)) { - counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, - removedINodes, countDiffChange)); + counts.add(cNode.cleanSubtree(snapshot, Snapshot.NO_SNAPSHOT_ID, + collectedBlocks, removedINodes, countDiffChange)); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java index 17ff375f78f..919ab564c66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java @@ -38,15 +38,15 @@ public class FileDiff extends /** The file size at snapshot creation time. */ private final long fileSize; - FileDiff(Snapshot snapshot, INodeFile file) { - super(snapshot, null, null); + FileDiff(int snapshotId, INodeFile file) { + super(snapshotId, null, null); fileSize = file.computeFileSize(); } /** Constructor used by FSImage loading */ - FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode, + FileDiff(int snapshotId, INodeFileAttributes snapshotINode, FileDiff posteriorDiff, long fileSize) { - super(snapshot, snapshotINode, posteriorDiff); + super(snapshotId, snapshotINode, posteriorDiff); this.fileSize = fileSize; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java index 54bcffa0031..b0a973d28ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -25,8 +25,8 @@ public class FileDiffList extends AbstractINodeDiffList { @Override - FileDiff createDiff(Snapshot snapshot, INodeFile file) { - return new FileDiff(snapshot, file); + FileDiff createDiff(int snapshotId, INodeFile file) { + return new FileDiff(snapshotId, file); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java index 5a611611271..e32f78a4575 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java @@ -78,22 +78,22 @@ public class FileWithSnapshotFeature implements INode.Feature { return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs; } - public Quota.Counts cleanFile(final INodeFile file, final Snapshot snapshot, - Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, + public Quota.Counts cleanFile(final INodeFile file, final int snapshotId, + int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks, final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { - if (snapshot == null) { + if (snapshotId == Snapshot.CURRENT_STATE_ID) { // delete the current file while the file has snapshot feature if (!isCurrentFileDeleted()) { - file.recordModification(prior); + file.recordModification(priorSnapshotId); deleteCurrentFile(); } collectBlocksAndClear(file, collectedBlocks, removedINodes); return Quota.Counts.newInstance(); } else { // delete the snapshot - prior = getDiffs().updatePrior(snapshot, prior); - return diffs.deleteSnapshotDiff(snapshot, prior, file, collectedBlocks, - removedINodes, countDiffChange); + priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId); + return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file, + collectedBlocks, removedINodes, countDiffChange); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java index a53c094b6c0..7e1863f9538 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java @@ -206,6 +206,15 @@ public class INodeDirectorySnapshottable extends INodeDirectory { return i < 0? null: snapshotsByNames.get(i); } + Snapshot getSnapshotById(int sid) { + for (Snapshot s : snapshotsByNames) { + if (s.getId() == sid) { + return s; + } + } + return null; + } + /** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */ public ReadOnlyList getSnapshotList() { return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames); @@ -297,13 +306,14 @@ public class INodeDirectorySnapshottable extends INodeDirectory { + "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\"."); } - final DirectoryDiff d = getDiffs().addDiff(s, this); - d.snapshotINode = s.getRoot(); + final DirectoryDiff d = getDiffs().addDiff(id, this); + d.setSnapshotRoot(s.getRoot()); snapshotsByNames.add(-i - 1, s); //set modification time - updateModificationTime(Time.now(), null); - s.getRoot().setModificationTime(getModificationTime(), null); + updateModificationTime(Time.now(), Snapshot.CURRENT_STATE_ID); + s.getRoot().setModificationTime(getModificationTime(), + Snapshot.CURRENT_STATE_ID); return s; } @@ -326,10 +336,10 @@ public class INodeDirectorySnapshottable extends INodeDirectory { + ": the snapshot does not exist."); } else { final Snapshot snapshot = snapshotsByNames.get(i); - Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot); + int prior = Snapshot.findLatestSnapshot(this, snapshot.getId()); try { - Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks, - removedINodes, true); + Quota.Counts counts = cleanSubtree(snapshot.getId(), prior, + collectedBlocks, removedINodes, true); INodeDirectory parent = getParent(); if (parent != null) { // there will not be any WithName node corresponding to the deleted @@ -425,8 +435,9 @@ public class INodeDirectorySnapshottable extends INodeDirectory { diffReport.addDirDiff(dir, relativePath, diff); } } - ReadOnlyList children = dir.getChildrenList(diffReport - .isFromEarlier() ? diffReport.to : diffReport.from); + ReadOnlyList children = dir.getChildrenList( + diffReport.isFromEarlier() ? Snapshot.getSnapshotId(diffReport.to) : + Snapshot.getSnapshotId(diffReport.from)); for (INode child : children) { final byte[] name = child.getLocalNameBytes(); if (diff.searchIndex(ListType.CREATED, name) < 0 @@ -454,16 +465,15 @@ public class INodeDirectorySnapshottable extends INodeDirectory { * Replace itself with {@link INodeDirectoryWithSnapshot} or * {@link INodeDirectory} depending on the latest snapshot. */ - INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap) + INodeDirectory replaceSelf(final int latestSnapshotId, final INodeMap inodeMap) throws QuotaExceededException { - if (latest == null) { - Preconditions.checkState( - getDirectoryWithSnapshotFeature().getLastSnapshot() == null, - "latest == null but getLastSnapshot() != null, this=%s", this); + if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) { + Preconditions.checkState(getDirectoryWithSnapshotFeature() + .getLastSnapshotId() == Snapshot.CURRENT_STATE_ID, "this=%s", this); } INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap); - if (latest != null) { - dir.recordModification(latest); + if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) { + dir.recordModification(latestSnapshotId); } return dir; } @@ -475,10 +485,10 @@ public class INodeDirectorySnapshottable extends INodeDirectory { @Override public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, - Snapshot snapshot) { + int snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); - if (snapshot == null) { + if (snapshot == Snapshot.CURRENT_STATE_ID) { out.println(); out.print(prefix); @@ -494,7 +504,8 @@ public class INodeDirectorySnapshottable extends INodeDirectory { n++; } } - Preconditions.checkState(n == snapshotsByNames.size()); + Preconditions.checkState(n == snapshotsByNames.size(), "#n=" + n + + ", snapshotsByNames.size()=" + snapshotsByNames.size()); out.print(", #snapshot="); out.println(n); @@ -522,8 +533,9 @@ public class INodeDirectorySnapshottable extends INodeDirectory { @Override public SnapshotAndINode next() { - final Snapshot s = next.snapshot; - final SnapshotAndINode pair = new SnapshotAndINode(s); + final SnapshotAndINode pair = new SnapshotAndINode(next + .getSnapshotId(), getSnapshotById(next.getSnapshotId()) + .getRoot()); next = findNext(); return pair; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 549dd65abf1..ba23439abe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -37,7 +37,11 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; /** Snapshot of a sub-tree in the namesystem. */ @InterfaceAudience.Private public class Snapshot implements Comparable { - public static final int INVALID_ID = -1; + /** + * This id is used to indicate the current state (vs. snapshots) + */ + public static final int CURRENT_STATE_ID = Integer.MAX_VALUE - 1; + public static final int NO_SNAPSHOT_ID = -1; /** * The pattern for generating the default snapshot name. @@ -61,14 +65,18 @@ public class Snapshot implements Comparable { .toString(); } - /** - * Get the name of the given snapshot. + /** + * Get the name of the given snapshot. * @param s The given snapshot. * @return The name of the snapshot, or an empty string if {@code s} is null */ static String getSnapshotName(Snapshot s) { return s != null ? s.getRoot().getLocalName() : ""; } + + public static int getSnapshotId(Snapshot s) { + return s == null ? CURRENT_STATE_ID : s.getId(); + } /** * Compare snapshot with IDs, where null indicates the current status thus @@ -78,9 +86,8 @@ public class Snapshot implements Comparable { = new Comparator() { @Override public int compare(Snapshot left, Snapshot right) { - return ID_INTEGER_COMPARATOR.compare( - left == null? null: left.getId(), - right == null? null: right.getId()); + return ID_INTEGER_COMPARATOR.compare(Snapshot.getSnapshotId(left), + Snapshot.getSnapshotId(right)); } }; @@ -92,12 +99,9 @@ public class Snapshot implements Comparable { = new Comparator() { @Override public int compare(Integer left, Integer right) { - // null means the current state, thus should be the largest - if (left == null) { - return right == null? 0: 1; - } else { - return right == null? -1: left - right; - } + // Snapshot.CURRENT_STATE_ID means the current state, thus should be the + // largest + return left - right; } }; @@ -108,12 +112,12 @@ public class Snapshot implements Comparable { * is not null). * * @param inode the given inode that the returned snapshot needs to cover - * @param anchor the returned snapshot should be taken before this snapshot. - * @return the latest snapshot covers the given inode and was taken before the - * the given snapshot (if it is not null). + * @param anchor the returned snapshot should be taken before this given id. + * @return id of the latest snapshot that covers the given inode and was taken + * before the the given snapshot (if it is not null). */ - public static Snapshot findLatestSnapshot(INode inode, Snapshot anchor) { - Snapshot latest = null; + public static int findLatestSnapshot(INode inode, final int anchor) { + int latest = NO_SNAPSHOT_ID; for(; inode != null; inode = inode.getParent()) { if (inode.isDirectory()) { final INodeDirectory dir = inode.asDirectory(); @@ -139,13 +143,13 @@ public class Snapshot implements Comparable { } @Override - public ReadOnlyList getChildrenList(Snapshot snapshot) { - return getParent().getChildrenList(snapshot); + public ReadOnlyList getChildrenList(int snapshotId) { + return getParent().getChildrenList(snapshotId); } @Override - public INode getChild(byte[] name, Snapshot snapshot) { - return getParent().getChild(name, snapshot); + public INode getChild(byte[] name, int snapshotId) { + return getParent().getChild(name, snapshotId); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index cd19b1b7b2e..e836cd87959 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -118,7 +118,7 @@ public class SnapshotFSImageFormat { private static FileDiff loadFileDiff(FileDiff posterior, DataInput in, FSImageFormat.Loader loader) throws IOException { - // 1. Read the full path of the Snapshot root to identify the Snapshot + // 1. Read the id of the Snapshot root to identify the Snapshot final Snapshot snapshot = loader.getSnapshot(in); // 2. Load file size @@ -128,7 +128,7 @@ public class SnapshotFSImageFormat { final INodeFileAttributes snapshotINode = in.readBoolean()? loader.loadINodeFileAttributes(in): null; - return new FileDiff(snapshot, snapshotINode, posterior, fileSize); + return new FileDiff(snapshot.getId(), snapshotINode, posterior, fileSize); } /** @@ -149,7 +149,8 @@ public class SnapshotFSImageFormat { } // else go to the next SnapshotDiff } // use the current child - INode currentChild = parent.getChild(createdNodeName, null); + INode currentChild = parent.getChild(createdNodeName, + Snapshot.CURRENT_STATE_ID); if (currentChild == null) { throw new IOException("Cannot find an INode associated with the INode " + DFSUtil.bytes2String(createdNodeName) @@ -295,9 +296,9 @@ public class SnapshotFSImageFormat { // 6. Compose the SnapshotDiff List diffs = parent.getDiffs().asList(); - DirectoryDiff sdiff = new DirectoryDiff(snapshot, snapshotINode, - diffs.isEmpty() ? null : diffs.get(0), - childrenSize, createdList, deletedList); + DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode, + diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList, + deletedList, snapshotINode == snapshot.getRoot()); return sdiff; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index cc8b0568aed..8fa0f0c932b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -114,7 +114,7 @@ public class SnapshotManager implements SnapshotStats { s = (INodeDirectorySnapshottable)d; s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT); } else { - s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot(), + s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(), fsdir.getINodeMap()); } addSnapshottable(s); @@ -160,7 +160,7 @@ public class SnapshotManager implements SnapshotStats { if (s == fsdir.getRoot()) { s.setSnapshotQuota(0); } else { - s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap()); + s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap()); } removeSnapshottable(s); } @@ -324,7 +324,8 @@ public class SnapshotManager implements SnapshotStats { SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus( dir.getModificationTime(), dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), - dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(null), + dir.getLocalNameBytes(), dir.getId(), + dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getNumSnapshots(), dir.getSnapshotQuota(), dir.getParent() == null ? DFSUtil.EMPTY_BYTES : diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index ba85e06c0b1..94c11be4b69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -119,7 +120,7 @@ public class TestFSDirectory { fsdir.reset(); Assert.assertFalse(fsdir.isReady()); final INodeDirectory root = (INodeDirectory) fsdir.getINode("/"); - Assert.assertTrue(root.getChildrenList(null).isEmpty()); + Assert.assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); fsdir.imageLoadComplete(); Assert.assertTrue(fsdir.isReady()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 95e3adb6e03..21935d05d9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.log4j.Level; @@ -195,11 +197,12 @@ public class TestFSImageWithSnapshot { INodeDirectorySnapshottable rootNode = (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString()); assertTrue("The children list of root should be empty", - rootNode.getChildrenList(null).isEmpty()); + rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); // one snapshot on root: s1 List diffList = rootNode.getDiffs().asList(); assertEquals(1, diffList.size()); - assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName()); + Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1")); + assertEquals(s1.getId(), diffList.get(0).getSnapshotId()); // check SnapshotManager's snapshottable directory list assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 3b32a397b7b..c8854c69326 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; @@ -781,7 +782,7 @@ public class TestINodeFile { } System.out.println("Adding component " + DFSUtil.bytes2String(component)); dir = new INodeDirectory(++id, component, permstatus, 0); - prev.addChild(dir, false, null); + prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID); prev = dir; } return dir; // Last Inode in the chain diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java index b7df5d5c8f6..2a5edbf0746 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java @@ -123,8 +123,12 @@ public class TestSnapshotPathINodes { final Snapshot snapshot, int index) { assertEquals(isSnapshot, inodesInPath.isSnapshot()); assertEquals(index, inodesInPath.getSnapshotRootIndex()); - assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot()); - assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot()); + assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null), + inodesInPath.getPathSnapshotId()); + if (!isSnapshot) { + assertEquals(Snapshot.getSnapshotId(snapshot), + inodesInPath.getLatestSnapshotId()); + } if (isSnapshot && index >= 0) { assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass()); } @@ -424,7 +428,7 @@ public class TestSnapshotPathINodes { // The modification time of the snapshot INode should be the same with the // original INode before modification assertEquals(modTime, - snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshot())); + snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId())); // Check the INode for /TestSnapshot/sub1/file1 again names = INode.getPathNames(file1.toString()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index 146065d39bd..5496d4ffb11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -270,7 +270,8 @@ public class SnapshotTestHelper { public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{ final PrintWriter out = new PrintWriter(new FileWriter(f, false), true); - fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(), null); + fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(), + Snapshot.CURRENT_STATE_ID); out.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java index e795906a2a0..1d1082cad5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java @@ -156,7 +156,6 @@ public class TestINodeFileUnderConstructionWithSnapshot { INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir .getINode(dir.toString()); DirectoryDiff last = dirNode.getDiffs().getLast(); - Snapshot s0 = last.snapshot; // 2. append without closing stream out = appendFileWithoutClosing(file, BLOCKSIZE); @@ -164,7 +163,7 @@ public class TestINodeFileUnderConstructionWithSnapshot { // re-check nodeInDeleted_S0 dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); - assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0)); + assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId())); // 3. take snapshot --> close stream hdfs.createSnapshot(dir, "s1"); @@ -175,9 +174,8 @@ public class TestINodeFileUnderConstructionWithSnapshot { fileNode = (INodeFile) fsdir.getINode(file.toString()); dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); last = dirNode.getDiffs().getLast(); - Snapshot s1 = last.snapshot; assertTrue(fileNode.isWithSnapshot()); - assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1)); + assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId())); // 4. modify file --> append without closing stream --> take snapshot --> // close stream @@ -187,7 +185,7 @@ public class TestINodeFileUnderConstructionWithSnapshot { out.close(); // re-check the size of nodeInDeleted_S1 - assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1)); + assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId())); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index b46705830b9..7fe8087f2a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; @@ -72,6 +73,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +; /** Testing rename with snapshots. */ public class TestRenameWithSnapshots { @@ -402,9 +404,11 @@ public class TestRenameWithSnapshots { final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo"); assertFalse(hdfs.exists(foo_s3)); + INodeDirectorySnapshottable sdir2Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString()); + Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile(); - assertEquals("s2", sfoo.getDiffs().getLastSnapshot().getRoot() - .getLocalName()); + assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId()); } /** @@ -604,8 +608,10 @@ public class TestRenameWithSnapshots { INodeFile snode = fsdir.getINode(newfoo.toString()).asFile(); assertEquals(1, snode.getDiffs().asList().size()); - assertEquals("s2", snode.getDiffs().getLastSnapshot().getRoot() - .getLocalName()); + INodeDirectorySnapshottable sdir2Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString()); + Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); + assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId()); // restart cluster restartClusterAndCheckImage(true); @@ -758,12 +764,14 @@ public class TestRenameWithSnapshots { assertEquals(2, fooWithCount.getReferenceCount()); INodeDirectory foo = fooWithCount.asDirectory(); assertEquals(1, foo.getDiffs().asList().size()); - assertEquals("s1", foo.getDirectoryWithSnapshotFeature().getLastSnapshot() - .getRoot().getLocalName()); + INodeDirectorySnapshottable sdir1Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString()); + Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); + assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature() + .getLastSnapshotId()); INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile(); assertEquals(1, bar1.getDiffs().asList().size()); - assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot() - .getLocalName()); + assertEquals(s1.getId(), bar1.getDiffs().getLastSnapshotId()); INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString()) .asReference(); @@ -772,8 +780,7 @@ public class TestRenameWithSnapshots { assertEquals(2, barWithCount.getReferenceCount()); INodeFile bar = barWithCount.asFile(); assertEquals(1, bar.getDiffs().asList().size()); - assertEquals("s1", bar.getDiffs().getLastSnapshot().getRoot() - .getLocalName()); + assertEquals(s1.getId(), bar.getDiffs().getLastSnapshotId()); // restart the cluster and check fsimage restartClusterAndCheckImage(true); @@ -967,6 +974,13 @@ public class TestRenameWithSnapshots { hdfs.rename(bar_dir2, bar_dir1); // check the internal details + INodeDirectorySnapshottable sdir1Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString()); + INodeDirectorySnapshottable sdir2Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString()); + INodeDirectorySnapshottable sdir3Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir3.toString()); + INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString()) .asReference(); INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode(); @@ -975,16 +989,22 @@ public class TestRenameWithSnapshots { INodeDirectory foo = fooWithCount.asDirectory(); List fooDiffs = foo.getDiffs().asList(); assertEquals(4, fooDiffs.size()); - assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName()); - assertEquals("s333", fooDiffs.get(2).snapshot.getRoot().getLocalName()); - assertEquals("s22", fooDiffs.get(1).snapshot.getRoot().getLocalName()); - assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); + + Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222")); + Snapshot s333 = sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333")); + Snapshot s22 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22")); + Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); + + assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId()); + assertEquals(s333.getId(), fooDiffs.get(2).getSnapshotId()); + assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId()); + assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId()); INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile(); List bar1Diffs = bar1.getDiffs().asList(); assertEquals(3, bar1Diffs.size()); - assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName()); - assertEquals("s22", bar1Diffs.get(1).snapshot.getRoot().getLocalName()); - assertEquals("s1", bar1Diffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId()); + assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId()); + assertEquals(s1.getId(), bar1Diffs.get(0).getSnapshotId()); INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString()) .asReference(); @@ -994,10 +1014,10 @@ public class TestRenameWithSnapshots { INodeFile bar = barWithCount.asFile(); List barDiffs = bar.getDiffs().asList(); assertEquals(4, barDiffs.size()); - assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName()); - assertEquals("s333", barDiffs.get(2).snapshot.getRoot().getLocalName()); - assertEquals("s22", barDiffs.get(1).snapshot.getRoot().getLocalName()); - assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId()); + assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId()); + assertEquals(s22.getId(), barDiffs.get(1).getSnapshotId()); + assertEquals(s1.getId(), barDiffs.get(0).getSnapshotId()); // restart the cluster and check fsimage restartClusterAndCheckImage(true); @@ -1033,10 +1053,10 @@ public class TestRenameWithSnapshots { foo = fooWithCount.asDirectory(); fooDiffs = foo.getDiffs().asList(); assertEquals(4, fooDiffs.size()); - assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName()); + assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId()); bar1Diffs = bar1.getDiffs().asList(); assertEquals(3, bar1Diffs.size()); - assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName()); + assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId()); barRef = fsdir.getINode(bar_s2222.toString()).asReference(); barWithCount = (WithCount) barRef.getReferredINode(); @@ -1044,7 +1064,7 @@ public class TestRenameWithSnapshots { bar = barWithCount.asFile(); barDiffs = bar.getDiffs().asList(); assertEquals(4, barDiffs.size()); - assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName()); + assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId()); } /** @@ -1164,6 +1184,9 @@ public class TestRenameWithSnapshots { assertTrue(hdfs.exists(bar_s2)); // check internal details + INodeDirectorySnapshottable sdir2Node = + (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString()); + Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo"); INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference(); assertTrue(fooRef instanceof INodeReference.WithName); @@ -1172,7 +1195,7 @@ public class TestRenameWithSnapshots { INodeDirectory fooDir = fooWC.getReferredINode().asDirectory(); List diffs = fooDir.getDiffs().asList(); assertEquals(1, diffs.size()); - assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s2.getId(), diffs.get(0).getSnapshotId()); // restart the cluster and check fsimage restartClusterAndCheckImage(true); @@ -1260,7 +1283,7 @@ public class TestRenameWithSnapshots { INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory mockDir2 = spy(dir2); doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), - (Snapshot) anyObject()); + Mockito.anyInt()); INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir2, mockDir2, fsdir.getINodeMap()); @@ -1271,12 +1294,14 @@ public class TestRenameWithSnapshots { // check the current internal details INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir .getINode4Write(sdir1.toString()); - ReadOnlyList dir1Children = dir1Node.getChildrenList(null); + Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); + ReadOnlyList dir1Children = dir1Node + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, dir1Children.size()); assertEquals(foo.getName(), dir1Children.get(0).getLocalName()); List dir1Diffs = dir1Node.getDiffs().asList(); assertEquals(1, dir1Diffs.size()); - assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId()); // after the undo of rename, both the created and deleted list of sdir1 // should be empty @@ -1288,7 +1313,7 @@ public class TestRenameWithSnapshots { assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot()); List fooDiffs = fooNode.asDirectory().getDiffs().asList(); assertEquals(1, fooDiffs.size()); - assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId()); final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo"); INode fooNode_s1 = fsdir.getINode(foo_s1.toString()); @@ -1299,7 +1324,8 @@ public class TestRenameWithSnapshots { INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()) .asDirectory(); assertFalse(dir2Node.isWithSnapshot()); - ReadOnlyList dir2Children = dir2Node.getChildrenList(null); + ReadOnlyList dir2Children = dir2Node + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, dir2Children.size()); assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName()); } @@ -1327,7 +1353,7 @@ public class TestRenameWithSnapshots { INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory mockDir2 = spy(dir2); doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), - (Snapshot) anyObject()); + Mockito.anyInt()); INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir2, mockDir2, fsdir.getINodeMap()); @@ -1338,12 +1364,14 @@ public class TestRenameWithSnapshots { // check the current internal details INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir .getINode4Write(sdir1.toString()); - ReadOnlyList dir1Children = dir1Node.getChildrenList(null); + Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); + ReadOnlyList dir1Children = dir1Node + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, dir1Children.size()); assertEquals(foo.getName(), dir1Children.get(0).getLocalName()); List dir1Diffs = dir1Node.getDiffs().asList(); assertEquals(1, dir1Diffs.size()); - assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId()); // after the undo of rename, the created list of sdir1 should contain // 1 element @@ -1363,7 +1391,8 @@ public class TestRenameWithSnapshots { INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()) .asDirectory(); assertFalse(dir2Node.isWithSnapshot()); - ReadOnlyList dir2Children = dir2Node.getChildrenList(null); + ReadOnlyList dir2Children = dir2Node + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, dir2Children.size()); assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName()); } @@ -1389,7 +1418,7 @@ public class TestRenameWithSnapshots { INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory mockDir3 = spy(dir3); doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(), - (Snapshot) anyObject()); + Mockito.anyInt()); INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3, mockDir3, fsdir.getINodeMap()); @@ -1400,13 +1429,18 @@ public class TestRenameWithSnapshots { assertFalse(result); // check the current internal details + INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir + .getINode4Write(sdir1.toString()); + Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir .getINode4Write(sdir2.toString()); - ReadOnlyList dir2Children = dir2Node.getChildrenList(null); + Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); + ReadOnlyList dir2Children = dir2Node + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, dir2Children.size()); List dir2Diffs = dir2Node.getDiffs().asList(); assertEquals(1, dir2Diffs.size()); - assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot)); + assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff(); assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); assertEquals(1, childrenDiff.getList(ListType.CREATED).size()); @@ -1418,7 +1452,7 @@ public class TestRenameWithSnapshots { assertTrue(fooNode instanceof INodeReference.DstReference); List fooDiffs = fooNode.asDirectory().getDiffs().asList(); assertEquals(1, fooDiffs.size()); - assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId()); // create snapshot on sdir2 and rename again hdfs.createSnapshot(sdir2, "s3"); @@ -1428,13 +1462,14 @@ public class TestRenameWithSnapshots { // check internal details again dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2 .toString()); + Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3")); fooNode = fsdir.getINode4Write(foo_dir2.toString()); - dir2Children = dir2Node.getChildrenList(null); + dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, dir2Children.size()); dir2Diffs = dir2Node.getDiffs().asList(); assertEquals(2, dir2Diffs.size()); - assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot)); - assertEquals("s3", Snapshot.getSnapshotName(dir2Diffs.get(1).snapshot)); + assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId()); + assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId()); childrenDiff = dir2Diffs.get(0).getChildrenDiff(); assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); @@ -1452,8 +1487,8 @@ public class TestRenameWithSnapshots { assertTrue(fooNode instanceof INodeReference.DstReference); fooDiffs = fooNode.asDirectory().getDiffs().asList(); assertEquals(2, fooDiffs.size()); - assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); - assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName()); + assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId()); + assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId()); } /** @@ -1489,9 +1524,9 @@ public class TestRenameWithSnapshots { INodeDirectory mockDir3 = spy(dir3); // fail the rename but succeed in undo doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(), - anyBoolean(), (Snapshot) anyObject()); + anyBoolean(), Mockito.anyInt()); Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), - (Snapshot) anyObject())).thenReturn(false).thenCallRealMethod(); + Mockito.anyInt())).thenReturn(false).thenCallRealMethod(); INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3, mockDir3, fsdir.getINodeMap()); foo3Node.setParent(mockDir3); @@ -1551,7 +1586,7 @@ public class TestRenameWithSnapshots { INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString()) .asDirectory(); List childrenList = ReadOnlyList.Util.asList(dir1Node - .getChildrenList(null)); + .getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1, childrenList.size()); INode fooNode = childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); @@ -1572,7 +1607,7 @@ public class TestRenameWithSnapshots { assertEquals(3, counts.get(Quota.NAMESPACE)); assertEquals(0, counts.get(Quota.DISKSPACE)); childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory() - .getChildrenList(null)); + .getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1, childrenList.size()); INode subdir2Node = childrenList.get(0); assertSame(dir2Node, subdir2Node.getParent()); @@ -1627,7 +1662,7 @@ public class TestRenameWithSnapshots { INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString()) .asDirectory(); List childrenList = ReadOnlyList.Util.asList(dir1Node - .getChildrenList(null)); + .getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1, childrenList.size()); INode fooNode = childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); @@ -1646,7 +1681,7 @@ public class TestRenameWithSnapshots { assertEquals(4, counts.get(Quota.NAMESPACE)); assertEquals(0, counts.get(Quota.DISKSPACE)); childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory() - .getChildrenList(null)); + .getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1, childrenList.size()); INode subdir2Node = childrenList.get(0); assertTrue(subdir2Node.asDirectory().isWithSnapshot()); @@ -1690,14 +1725,18 @@ public class TestRenameWithSnapshots { } // check + INodeDirectorySnapshottable rootNode = (INodeDirectorySnapshottable) fsdir + .getINode4Write(root.toString()); INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory(); - ReadOnlyList children = fooNode.getChildrenList(null); + ReadOnlyList children = fooNode + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, children.size()); List diffList = fooNode.getDiffs().asList(); assertEquals(1, diffList.size()); DirectoryDiff diff = diffList.get(0); // this diff is generated while renaming - assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot)); + Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1)); + assertEquals(s1.getId(), diff.getSnapshotId()); // after undo, the diff should be empty assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); @@ -1709,7 +1748,7 @@ public class TestRenameWithSnapshots { List barDiffList = barNode.getDiffs().asList(); assertEquals(1, barDiffList.size()); FileDiff barDiff = barDiffList.get(0); - assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot)); + assertEquals(s1.getId(), barDiff.getSnapshotId()); // restart cluster multiple times to make sure the fsimage and edits log are // correct. Note that when loading fsimage, foo and bar will be converted @@ -1941,12 +1980,14 @@ public class TestRenameWithSnapshots { (WithCount) fooRef.asReference().getReferredINode(); assertEquals(1, wc.getReferenceCount()); INodeDirectory fooNode = wc.getReferredINode().asDirectory(); - ReadOnlyList children = fooNode.getChildrenList(null); + ReadOnlyList children = fooNode + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1, children.size()); assertEquals(bar.getName(), children.get(0).getLocalName()); List diffList = fooNode.getDiffs().asList(); assertEquals(1, diffList.size()); - assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot)); + Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); + assertEquals(s1.getId(), diffList.get(0).getSnapshotId()); ChildrenDiff diff = diffList.get(0).getChildrenDiff(); assertEquals(0, diff.getList(ListType.CREATED).size()); assertEquals(0, diff.getList(ListType.DELETED).size()); @@ -2009,14 +2050,16 @@ public class TestRenameWithSnapshots { (WithCount) fooRef.asReference().getReferredINode(); assertEquals(2, wc.getReferenceCount()); INodeDirectory fooNode = wc.getReferredINode().asDirectory(); - ReadOnlyList children = fooNode.getChildrenList(null); + ReadOnlyList children = fooNode + .getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(3, children.size()); assertEquals(bar.getName(), children.get(0).getLocalName()); assertEquals(bar2.getName(), children.get(1).getLocalName()); assertEquals(bar3.getName(), children.get(2).getLocalName()); List diffList = fooNode.getDiffs().asList(); assertEquals(1, diffList.size()); - assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot)); + Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); + assertEquals(s1.getId(), diffList.get(0).getSnapshotId()); ChildrenDiff diff = diffList.get(0).getChildrenDiff(); // bar2 and bar3 in the created list assertEquals(2, diff.getList(ListType.CREATED).size()); @@ -2134,11 +2177,12 @@ public class TestRenameWithSnapshots { // recordModification before the rename assertTrue(fooNode.isWithSnapshot()); assertTrue(fooNode.getDiffs().asList().isEmpty()); - INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory(); + INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID) + .get(0).asDirectory(); // bar should also be INodeDirectory (With Snapshot), and both of its diff // list and children list are empty assertTrue(barNode.getDiffs().asList().isEmpty()); - assertTrue(barNode.getChildrenList(null).isEmpty()); + assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); restartClusterAndCheckImage(true); } @@ -2210,7 +2254,10 @@ public class TestRenameWithSnapshots { List barDiffList = barNode.getDiffs().asList(); assertEquals(1, barDiffList.size()); DirectoryDiff diff = barDiffList.get(0); - assertEquals("s0", Snapshot.getSnapshotName(diff.snapshot)); + INodeDirectorySnapshottable testNode = + (INodeDirectorySnapshottable) fsdir.getINode4Write(test.toString()); + Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0")); + assertEquals(s0.getId(), diff.getSnapshotId()); // and file should be stored in the deleted list of this snapshot diff assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED) .get(0).getLocalName()); @@ -2276,7 +2323,7 @@ public class TestRenameWithSnapshots { final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0", "foo/bar"); INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory(); - assertEquals(0, barNode.getChildrenList(null).size()); + assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size()); List diffList = barNode.getDiffs().asList(); assertEquals(1, diffList.size()); DirectoryDiff diff = diffList.get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java index 82b6ccde081..187f29e2731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -153,7 +154,9 @@ public class TestSetQuotaWithSnapshot { assertTrue(subNode.asDirectory().isWithSnapshot()); List diffList = subNode.asDirectory().getDiffs().asList(); assertEquals(1, diffList.size()); - assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot)); + Snapshot s2 = ((INodeDirectorySnapshottable) dirNode).getSnapshot(DFSUtil + .string2Bytes("s2")); + assertEquals(s2.getId(), diffList.get(0).getSnapshotId()); List createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED); assertEquals(1, createdList.size()); assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index 237129aba0e..bd7a4c38568 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -268,7 +268,8 @@ public class TestSnapshotDeletion { (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString()); // should still be an INodeDirectory assertEquals(INodeDirectory.class, snapshotNode.getClass()); - ReadOnlyList children = snapshotNode.getChildrenList(null); + ReadOnlyList children = snapshotNode + .getChildrenList(Snapshot.CURRENT_STATE_ID); // check 2 children: noChangeFile and metaChangeFile2 assertEquals(2, children.size()); INode noChangeFileSCopy = children.get(1); @@ -286,11 +287,11 @@ public class TestSnapshotDeletion { // check the replication factor of metaChangeFile2SCopy assertEquals(REPLICATION_1, - metaChangeFile2SCopy.getFileReplication(null)); + metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID)); assertEquals(REPLICATION_1, - metaChangeFile2SCopy.getFileReplication(snapshot1)); + metaChangeFile2SCopy.getFileReplication(snapshot1.getId())); assertEquals(REPLICATION, - metaChangeFile2SCopy.getFileReplication(snapshot0)); + metaChangeFile2SCopy.getFileReplication(snapshot0.getId())); // Case 4: delete directory sub // before deleting sub, we first create a new file under sub @@ -316,23 +317,25 @@ public class TestSnapshotDeletion { assertTrue(snapshotNode4Sub.isWithSnapshot()); // the snapshot copy of sub has only one child subsub. // newFile should have been destroyed - assertEquals(1, snapshotNode4Sub.getChildrenList(null).size()); + assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID) + .size()); // but should have two children, subsub and noChangeDir, when s1 was taken - assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1).size()); + assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size()); // check the snapshot copy of subsub, which is contained in the subtree of // sub's snapshot copy - INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0); + INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList( + Snapshot.CURRENT_STATE_ID).get(0); assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot()); assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent()); // check the children of subsub INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub; - children = snapshotSubsubDir.getChildrenList(null); + children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(2, children.size()); assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName()); assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName()); // only one child before snapshot s0 - children = snapshotSubsubDir.getChildrenList(snapshot0); + children = snapshotSubsubDir.getChildrenList(snapshot0.getId()); assertEquals(1, children.size()); INode child = children.get(0); assertEquals(child.getLocalName(), metaChangeFile1.getName()); @@ -341,11 +344,11 @@ public class TestSnapshotDeletion { assertTrue(metaChangeFile1SCopy.isWithSnapshot()); assertFalse(metaChangeFile1SCopy.isUnderConstruction()); assertEquals(REPLICATION_1, - metaChangeFile1SCopy.getFileReplication(null)); + metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID)); assertEquals(REPLICATION_1, - metaChangeFile1SCopy.getFileReplication(snapshot1)); + metaChangeFile1SCopy.getFileReplication(snapshot1.getId())); assertEquals(REPLICATION, - metaChangeFile1SCopy.getFileReplication(snapshot0)); + metaChangeFile1SCopy.getFileReplication(snapshot0.getId())); } /** @@ -474,9 +477,10 @@ public class TestSnapshotDeletion { (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0")); assertNull(snapshot0); + Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1")); DirectoryDiffList diffList = dirNode.getDiffs(); assertEquals(1, diffList.asList().size()); - assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName()); + assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId()); diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory() .getDiffs(); assertEquals(0, diffList.asList().size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java index 726d1fce5a5..98d2accbb22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java @@ -101,7 +101,8 @@ public class TestSnapshotRename { List listByTime = srcRoot.getDiffs().asList(); assertEquals(names.length, listByTime.size()); for (int i = 0; i < listByTime.size(); i++) { - assertEquals(names[i], listByTime.get(i).getSnapshot().getRoot().getLocalName()); + Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId()); + assertEquals(names[i], s.getRoot().getLocalName()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java index 13428e5983e..e1ca26393b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java @@ -150,7 +150,7 @@ public class TestSnapshotReplication { assertEquals(expectedBlockRep, ssInode.getBlockReplication()); // Also check the number derived from INodeFile#getFileReplication assertEquals(snapshotRepMap.get(ss).shortValue(), - ssInode.getFileReplication(iip.getPathSnapshot())); + ssInode.getFileReplication(iip.getPathSnapshotId())); } }