Merge trunk to HDFS-4685.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1556665 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-01-08 23:10:02 +00:00
commit 17ab35314f
73 changed files with 1866 additions and 1047 deletions

View File

@ -412,6 +412,9 @@ Release 2.4.0 - UNRELEASED
HADOOP-10198. DomainSocket: add support for socketpair.
(Colin Patrick McCabe via wang)
HADOOP-10208. Remove duplicate initialization in StringUtils.getStringCollection.
(Benoy Antony via jing9)
OPTIMIZATIONS
HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)

View File

@ -766,8 +766,9 @@ private void createConnection() throws IOException, KeeperException {
zkClient = getNewZooKeeper();
LOG.debug("Created new connection for " + this);
}
void terminateConnection() {
@InterfaceAudience.Private
public void terminateConnection() {
if (zkClient == null) {
return;
}

View File

@ -343,7 +343,6 @@ public static Collection<String> getStringCollection(String str, String delim) {
if (str == null)
return values;
StringTokenizer tokenizer = new StringTokenizer(str, delim);
values = new ArrayList<String>();
while (tokenizer.hasMoreTokens()) {
values.add(tokenizer.nextToken());
}

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
/**
* Main class for starting mountd daemon. This daemon implements the NFS
@ -71,8 +72,24 @@ public void start(boolean register) {
startUDPServer();
startTCPServer();
if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
}
}
/**
* Priority of the mountd shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class Unregister implements Runnable {
@Override
public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
}
}
}

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
/**
* Nfs server. Supports NFS v3 using {@link RpcProgram}.
@ -50,6 +51,8 @@ public void start(boolean register) {
startTCPServer(); // Start TCP server
if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
}
}
@ -61,4 +64,16 @@ private void startTCPServer() {
tcpServer.run();
nfsBoundPort = tcpServer.getBoundPort();
}
/**
* Priority of the nfsd shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class Unregister implements Runnable {
@Override
public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
}
}
}

View File

@ -78,23 +78,41 @@ public void register(int transport, int boundPort) {
for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
port);
register(mapEntry);
register(mapEntry, true);
}
}
/**
* Unregister this program with the local portmapper.
*/
public void unregister(int transport, int boundPort) {
if (boundPort != port) {
LOG.info("The bound port is " + boundPort
+ ", different with configured port " + port);
port = boundPort;
}
// Unregister all the program versions with portmapper for a given transport
for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
port);
register(mapEntry, false);
}
}
/**
* Register the program with Portmap or Rpcbind
*/
protected void register(PortmapMapping mapEntry) {
XDR mappingRequest = PortmapRequest.create(mapEntry);
protected void register(PortmapMapping mapEntry, boolean set) {
XDR mappingRequest = PortmapRequest.create(mapEntry, set);
SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
mappingRequest);
try {
registrationClient.run();
} catch (IOException e) {
LOG.error("Registration failure with " + host + ":" + port
String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port
+ ", portmap entry: " + mapEntry);
throw new RuntimeException("Registration failure");
throw new RuntimeException(request + " failure");
}
}

View File

@ -31,13 +31,14 @@ public static PortmapMapping mapping(XDR xdr) {
return PortmapMapping.deserialize(xdr);
}
public static XDR create(PortmapMapping mapping) {
public static XDR create(PortmapMapping mapping, boolean set) {
XDR request = new XDR();
int procedure = set ? RpcProgramPortmap.PMAPPROC_SET
: RpcProgramPortmap.PMAPPROC_UNSET;
RpcCall call = RpcCall.getInstance(
RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(),
new VerifierNone());
RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, procedure,
new CredentialsNone(), new VerifierNone());
call.write(request);
return mapping.serialize(request);
}

View File

@ -19,6 +19,10 @@
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@ -29,7 +33,9 @@
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ShutdownHookManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
@ -94,7 +100,7 @@ public int hashCode() {
DFSClientCache(Configuration config) {
this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE);
}
DFSClientCache(Configuration config, int clientCache) {
this.config = config;
this.clientCache = CacheBuilder.newBuilder()
@ -107,8 +113,52 @@ public int hashCode() {
.expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
.removalListener(inputStreamRemovalListener())
.build(inputStreamLoader());
ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(),
SHUTDOWN_HOOK_PRIORITY);
}
/**
* Priority of the FileSystem shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class CacheFinalizer implements Runnable {
@Override
public synchronized void run() {
try {
closeAll(true);
} catch (IOException e) {
LOG.info("DFSClientCache.closeAll() threw an exception:\n", e);
}
}
}
/**
* Close all DFSClient instances in the Cache.
* @param onlyAutomatic only close those that are marked for automatic closing
*/
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
List<IOException> exceptions = new ArrayList<IOException>();
ConcurrentMap<String, DFSClient> map = clientCache.asMap();
for (Entry<String, DFSClient> item : map.entrySet()) {
final DFSClient client = item.getValue();
if (client != null) {
try {
client.close();
} catch (IOException ioe) {
exceptions.add(ioe);
}
}
}
if (!exceptions.isEmpty()) {
throw MultipleIOException.createIOException(exceptions);
}
}
private CacheLoader<String, DFSClient> clientLoader() {
return new CacheLoader<String, DFSClient>() {
@Override

View File

@ -242,6 +242,9 @@ Trunk (Unreleased)
HDFS-5651. Remove dfs.namenode.caching.enabled and improve CRM locking.
(cmccabe via wang)
HDFS-5715. Use Snapshot ID to indicate the corresponding Snapshot for a
FileDiff/DirectoryDiff. (jing9)
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@ -467,6 +470,11 @@ Trunk (Unreleased)
HDFS-5589. Namenode loops caching and uncaching when data should be
uncached (awang via cmccabe)
HDFS-5724. modifyCacheDirective logging audit log command wrongly as
addCacheDirective (Uma Maheswara Rao G via Colin Patrick McCabe)
HDFS-5726. Fix compilation error in AbstractINodeDiff for JDK7. (jing9)
Release 2.4.0 - UNRELEASED
INCOMPATIBLE CHANGES
@ -1055,6 +1063,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh)
HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
(brandonli)
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES

View File

@ -49,6 +49,7 @@
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.Time;
@ -339,7 +340,8 @@ private void rescanCacheDirectives() {
}
} else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir.getChildrenList(null);
ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
for (INode child : children) {
if (child.isFile()) {
rescanFile(directive, child.asFile());

View File

@ -53,7 +53,6 @@
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@ -69,6 +68,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
@ -399,7 +399,8 @@ private CacheDirectiveStats computeNeeded(String path, short replication) {
requestedBytes = file.computeFileSize();
} else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir.getChildrenList(null);
ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
requestedFiles = children.size();
for (INode child : children) {
if (child.isFile()) {

View File

@ -616,14 +616,14 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
INode srcChild = srcIIP.getLastINode();
final byte[] srcChildName = srcChild.getLocalNameBytes();
final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot(
srcIIP.getLatestSnapshot());
srcIIP.getLatestSnapshotId());
final boolean srcChildIsReference = srcChild.isReference();
// Record the snapshot on srcChild. After the rename, before any new
// snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree.
if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot());
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId());
srcIIP.setLastINode(srcChild);
}
@ -631,17 +631,16 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
final INodeReference.WithCount withCount;
Quota.Counts oldSrcCounts = Quota.Counts.newInstance();
int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference()
.getDstSnapshotId() : Snapshot.INVALID_ID;
.getDstSnapshotId() : Snapshot.CURRENT_STATE_ID;
if (isSrcInSnapshot) {
final INodeReference.WithName withName =
srcIIP.getINode(-2).asDirectory().replaceChild4ReferenceWithName(
srcChild, srcIIP.getLatestSnapshot());
srcChild, srcIIP.getLatestSnapshotId());
withCount = (INodeReference.WithCount) withName.getReferredINode();
srcChild = withName;
srcIIP.setLastINode(srcChild);
// get the counts before rename
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true,
Snapshot.INVALID_ID);
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true);
} else if (srcChildIsReference) {
// srcChild is reference but srcChild is not in latest snapshot
withCount = (WithCount) srcChild.asReference().getReferredINode();
@ -677,10 +676,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
toDst = srcChild;
} else {
withCount.getReferredINode().setLocalName(dstChildName);
Snapshot dstSnapshot = dstIIP.getLatestSnapshot();
int dstSnapshotId = dstIIP.getLatestSnapshotId();
final INodeReference.DstReference ref = new INodeReference.DstReference(
dstParent.asDirectory(), withCount,
dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId());
dstParent.asDirectory(), withCount, dstSnapshotId);
toDst = ref;
}
@ -692,9 +690,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
}
// update modification time of dst and the parent of src
final INode srcParent = srcIIP.getINode(-2);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot());
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
dstParent = dstIIP.getINode(-2); // refresh dstParent
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot());
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
// update moved leases with new filename
getFSNamesystem().unprotectedChangeLease(src, dst);
@ -702,7 +700,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
if (isSrcInSnapshot) {
// get the counts after rename
Quota.Counts newSrcCounts = srcChild.computeQuotaUsage(
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
Quota.Counts.newInstance(), false);
newSrcCounts.subtract(oldSrcCounts);
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
newSrcCounts.get(Quota.DISKSPACE), false);
@ -734,8 +732,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
if (isSrcInSnapshot) {
// srcParent must have snapshot feature since isSrcInSnapshot is true
// and src node has been removed from srcParent
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild,
srcIIP.getLatestSnapshot());
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
} else {
// original srcChild is not in latest snapshot, we only need to add
// the srcChild back
@ -838,7 +835,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
}
if (dstInode.isDirectory()) {
final ReadOnlyList<INode> children = dstInode.asDirectory()
.getChildrenList(null);
.getChildrenList(Snapshot.CURRENT_STATE_ID);
if (!children.isEmpty()) {
error = "rename destination directory is not empty: " + dst;
NameNode.stateChangeLog.warn(
@ -869,31 +866,30 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
INode srcChild = srcIIP.getLastINode();
final byte[] srcChildName = srcChild.getLocalNameBytes();
final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot(
srcIIP.getLatestSnapshot());
srcIIP.getLatestSnapshotId());
final boolean srcChildIsReference = srcChild.isReference();
// Record the snapshot on srcChild. After the rename, before any new
// snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree.
if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot());
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId());
srcIIP.setLastINode(srcChild);
}
// check srcChild for reference
final INodeReference.WithCount withCount;
int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference()
.getDstSnapshotId() : Snapshot.INVALID_ID;
.getDstSnapshotId() : Snapshot.CURRENT_STATE_ID;
Quota.Counts oldSrcCounts = Quota.Counts.newInstance();
if (isSrcInSnapshot) {
final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory()
.replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshot());
.replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshotId());
withCount = (INodeReference.WithCount) withName.getReferredINode();
srcChild = withName;
srcIIP.setLastINode(srcChild);
// get the counts before rename
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true,
Snapshot.INVALID_ID);
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true);
} else if (srcChildIsReference) {
// srcChild is reference but srcChild is not in latest snapshot
withCount = (WithCount) srcChild.asReference().getReferredINode();
@ -937,10 +933,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
toDst = srcChild;
} else {
withCount.getReferredINode().setLocalName(dstChildName);
Snapshot dstSnapshot = dstIIP.getLatestSnapshot();
int dstSnapshotId = dstIIP.getLatestSnapshotId();
final INodeReference.DstReference ref = new INodeReference.DstReference(
dstIIP.getINode(-2).asDirectory(), withCount,
dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId());
dstIIP.getINode(-2).asDirectory(), withCount, dstSnapshotId);
toDst = ref;
}
@ -954,9 +949,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
}
final INode srcParent = srcIIP.getINode(-2);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot());
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
dstParent = dstIIP.getINode(-2);
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot());
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
// update moved lease with new filename
getFSNamesystem().unprotectedChangeLease(src, dst);
@ -966,8 +961,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
undoRemoveDst = false;
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>();
filesDeleted = removedDst.cleanSubtree(null,
dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true)
filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, true)
.get(Quota.NAMESPACE);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
removedINodes);
@ -983,7 +978,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
if (isSrcInSnapshot) {
// get the counts after rename
Quota.Counts newSrcCounts = srcChild.computeQuotaUsage(
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
Quota.Counts.newInstance(), false);
newSrcCounts.subtract(oldSrcCounts);
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
newSrcCounts.get(Quota.DISKSPACE), false);
@ -1014,8 +1009,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
}
if (srcParent.isWithSnapshot()) {
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild,
srcIIP.getLatestSnapshot());
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
} else {
// srcParent is not an INodeDirectoryWithSnapshot, we only need to add
// the srcChild back
@ -1026,7 +1020,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
// Rename failed - restore dst
if (dstParent.isDirectory() && dstParent.asDirectory().isWithSnapshot()) {
dstParent.asDirectory().undoRename4DstParent(removedDst,
dstIIP.getLatestSnapshot());
dstIIP.getLatestSnapshotId());
} else {
addLastINodeNoQuotaCheck(dstIIP, removedDst);
}
@ -1090,7 +1084,7 @@ Block[] unprotectedSetReplication(String src, short replication,
updateCount(iip, 0, dsDelta, true);
}
file = file.setFileReplication(replication, iip.getLatestSnapshot(),
file = file.setFileReplication(replication, iip.getLatestSnapshotId(),
inodeMap);
final short newBR = file.getBlockReplication();
@ -1157,7 +1151,7 @@ void unprotectedSetPermission(String src, FsPermission permissions)
if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src);
}
inode.setPermission(permissions, inodesInPath.getLatestSnapshot());
inode.setPermission(permissions, inodesInPath.getLatestSnapshotId());
}
void setOwner(String src, String username, String groupname)
@ -1182,10 +1176,10 @@ void unprotectedSetOwner(String src, String username, String groupname)
throw new FileNotFoundException("File does not exist: " + src);
}
if (username != null) {
inode = inode.setUser(username, inodesInPath.getLatestSnapshot());
inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
}
if (groupname != null) {
inode.setGroup(groupname, inodesInPath.getLatestSnapshot());
inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
}
}
@ -1227,12 +1221,12 @@ void unprotectedConcat(String target, String [] srcs, long timestamp)
final INode[] trgINodes = trgIIP.getINodes();
final INodeFile trgInode = trgIIP.getLastINode().asFile();
INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory();
final Snapshot trgLatestSnapshot = trgIIP.getLatestSnapshot();
final int trgLatestSnapshot = trgIIP.getLatestSnapshotId();
final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
for(int i = 0; i < srcs.length; i++) {
final INodesInPath iip = getINodesInPath4Write(srcs[i]);
final Snapshot latest = iip.getLatestSnapshot();
final int latest = iip.getLatestSnapshotId();
final INode inode = iip.getLastINode();
// check if the file in the latest snapshot
@ -1356,7 +1350,7 @@ boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException {
//not found or not a directory
return false;
}
final Snapshot s = inodesInPath.getPathSnapshot();
final int s = inodesInPath.getPathSnapshotId();
return !inode.asDirectory().getChildrenList(s).isEmpty();
} finally {
readUnlock();
@ -1410,7 +1404,7 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
}
// record modification
final Snapshot latestSnapshot = iip.getLatestSnapshot();
final int latestSnapshot = iip.getLatestSnapshotId();
targetNode = targetNode.recordModification(latestSnapshot);
iip.setLastINode(targetNode);
@ -1431,8 +1425,8 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else {
Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot,
collectedBlocks, removedINodes, true);
Quota.Counts counts = targetNode.cleanSubtree(Snapshot.CURRENT_STATE_ID,
latestSnapshot, collectedBlocks, removedINodes, true);
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-counts.get(Quota.DISKSPACE), true);
removed = counts.get(Quota.NAMESPACE);
@ -1469,7 +1463,7 @@ private static void checkSnapshot(INode target,
}
}
}
for (INode child : targetDir.getChildrenList(null)) {
for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
checkSnapshot(child, snapshottableDirs);
}
}
@ -1493,7 +1487,7 @@ DirectoryListing getListing(String src, byte[] startAfter,
return getSnapshotsListing(srcs, startAfter);
}
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true);
final Snapshot snapshot = inodesInPath.getPathSnapshot();
final int snapshot = inodesInPath.getPathSnapshotId();
final INode targetNode = inodesInPath.getINode(0);
if (targetNode == null)
return null;
@ -1545,7 +1539,8 @@ private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
Snapshot.CURRENT_STATE_ID);
}
return new DirectoryListing(
listing, snapshots.size() - skipSize - numOfListing);
@ -1568,7 +1563,7 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink)
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink);
final INode i = inodesInPath.getINode(0);
return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i,
inodesInPath.getPathSnapshot());
inodesInPath.getPathSnapshotId());
} finally {
readUnlock();
}
@ -2131,7 +2126,7 @@ void verifyMaxDirItems(INode[] pathComponents, int pos)
}
final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(null).size();
final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count);
@ -2195,7 +2190,7 @@ private boolean addChild(INodesInPath iip, int pos,
final INodeDirectory parent = inodes[pos-1].asDirectory();
boolean added = false;
try {
added = parent.addChild(child, true, iip.getLatestSnapshot());
added = parent.addChild(child, true, iip.getLatestSnapshotId());
} catch (QuotaExceededException e) {
updateCountNoQuotaCheck(iip, pos,
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
@ -2230,7 +2225,7 @@ private boolean addLastINodeNoQuotaCheck(INodesInPath inodesInPath, INode i) {
*/
private long removeLastINode(final INodesInPath iip)
throws QuotaExceededException {
final Snapshot latestSnapshot = iip.getLatestSnapshot();
final int latestSnapshot = iip.getLatestSnapshotId();
final INode last = iip.getLastINode();
final INodeDirectory parent = iip.getINode(-2).asDirectory();
if (!parent.removeChild(last, latestSnapshot)) {
@ -2384,7 +2379,7 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
return null;
}
final Snapshot latest = iip.getLatestSnapshot();
final int latest = iip.getLatestSnapshotId();
dirNode = dirNode.recordModification(latest);
dirNode.setQuota(nsQuota, dsQuota);
return dirNode;
@ -2427,11 +2422,11 @@ long totalInodes() {
* Sets the access time on the file/directory. Logs it in the transaction log.
*/
void setTimes(String src, INode inode, long mtime, long atime, boolean force,
Snapshot latest) throws QuotaExceededException {
int latestSnapshotId) throws QuotaExceededException {
boolean status = false;
writeLock();
try {
status = unprotectedSetTimes(inode, mtime, atime, force, latest);
status = unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId);
} finally {
writeUnlock();
}
@ -2445,11 +2440,11 @@ boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force)
assert hasWriteLock();
final INodesInPath i = getLastINodeInPath(src);
return unprotectedSetTimes(i.getLastINode(), mtime, atime, force,
i.getLatestSnapshot());
i.getLatestSnapshotId());
}
private boolean unprotectedSetTimes(INode inode, long mtime,
long atime, boolean force, Snapshot latest) throws QuotaExceededException {
long atime, boolean force, int latest) throws QuotaExceededException {
assert hasWriteLock();
boolean status = false;
if (mtime != -1) {
@ -2457,7 +2452,7 @@ private boolean unprotectedSetTimes(INode inode, long mtime,
status = true;
}
if (atime != -1) {
long inodeTime = inode.getAccessTime(null);
long inodeTime = inode.getAccessTime();
// if the last access time update was within the last precision interval, then
// no need to store access time
@ -2497,7 +2492,7 @@ void reset() {
* @throws IOException if any error occurs
*/
private HdfsFileStatus createFileStatus(byte[] path, INode node,
boolean needLocation, Snapshot snapshot) throws IOException {
boolean needLocation, int snapshot) throws IOException {
if (needLocation) {
return createLocatedFileStatus(path, node, snapshot);
} else {
@ -2508,7 +2503,7 @@ private HdfsFileStatus createFileStatus(byte[] path, INode node,
* Create FileStatus by file INode
*/
HdfsFileStatus createFileStatus(byte[] path, INode node,
Snapshot snapshot) {
int snapshot) {
long size = 0; // length is zero for directories
short replication = 0;
long blocksize = 0;
@ -2541,7 +2536,7 @@ HdfsFileStatus createFileStatus(byte[] path, INode node,
* Create FileStatus with location info by file INode
*/
private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path,
INode node, Snapshot snapshot) throws IOException {
INode node, int snapshot) throws IOException {
assert hasReadLock();
long size = 0; // length is zero for directories
short replication = 0;
@ -2553,7 +2548,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path,
replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize();
final boolean inSnapshot = snapshot != null;
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction();
final long fileSize = !inSnapshot && isUc ?
fileNode.computeFileSizeNotIncludingLastUcBlock() : size;

View File

@ -24,14 +24,12 @@
import java.io.InputStream;
import java.util.Arrays;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -80,6 +78,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
@ -328,7 +327,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// add the op into retry cache if necessary
if (toAddRetryCache) {
HdfsFileStatus stat = fsNamesys.dir.createFileStatus(
HdfsFileStatus.EMPTY_NAME, newFile, null);
HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
addCloseOp.rpcCallId, stat);
}
@ -341,7 +340,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
}
LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path,
oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null,
false, iip.getLatestSnapshot(), false);
false, iip.getLatestSnapshotId(), false);
newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path),
addCloseOp.path, true);
@ -357,8 +356,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// update the block list.
// Update the salient file attributes.
newFile.setAccessTime(addCloseOp.atime, null);
newFile.setModificationTime(addCloseOp.mtime, null);
newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
updateBlocks(fsDir, addCloseOp, newFile);
break;
}
@ -376,8 +375,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
// Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, null);
file.setModificationTime(addCloseOp.mtime, null);
file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
updateBlocks(fsDir, addCloseOp, file);
// Now close the file

View File

@ -53,6 +53,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
@ -770,7 +771,7 @@ private static void updateCountForQuotaRecursively(INodeDirectory dir,
dir.computeQuotaUsage4CurrentDirectory(counts);
for (INode child : dir.getChildrenList(null)) {
for (INode child : dir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
if (child.isDirectory()) {
updateCountForQuotaRecursively(child.asDirectory(), counts);
} else {

View File

@ -1144,7 +1144,8 @@ private void saveImage(INodeDirectory current, DataOutputStream out,
return;
}
final ReadOnlyList<INode> children = current.getChildrenList(null);
final ReadOnlyList<INode> children = current
.getChildrenList(Snapshot.CURRENT_STATE_ID);
int dirNum = 0;
List<INodeDirectory> snapshotDirs = null;
DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature();

View File

@ -178,7 +178,15 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.OutOfV1GenerationStampsException;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@ -1619,11 +1627,11 @@ && doAccessTime && isAccessTimeSupported()) {
if (isReadOp) {
continue;
}
dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshot());
dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshotId());
}
}
final long fileSize = iip.isSnapshot() ?
inode.computeFileSize(iip.getPathSnapshot())
inode.computeFileSize(iip.getPathSnapshotId())
: inode.computeFileSizeNotIncludingLastUcBlock();
boolean isUc = inode.isUnderConstruction();
if (iip.isSnapshot()) {
@ -1871,7 +1879,7 @@ private void setTimesInt(String src, long mtime, long atime)
final INodesInPath iip = dir.getINodesInPath4Write(src);
final INode inode = iip.getLastINode();
if (inode != null) {
dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshot());
dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshotId());
resultingStat = getAuditFileInfo(src, false);
} else {
throw new FileNotFoundException("File/Directory " + src + " does not exist.");
@ -2267,7 +2275,7 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src,
final DatanodeDescriptor clientNode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
true, iip.getLatestSnapshot(), logRetryCache);
true, iip.getLatestSnapshotId(), logRetryCache);
} catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
throw ie;
@ -2292,7 +2300,7 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src,
*/
LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache)
boolean writeToEditLog, int latestSnapshot, boolean logRetryCache)
throws IOException {
file = file.recordModification(latestSnapshot);
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
@ -2927,7 +2935,7 @@ private boolean completeFileInternal(String src,
}
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot());
iip.getLatestSnapshotId());
return true;
}
@ -3636,7 +3644,7 @@ boolean internalReleaseLease(Lease lease, String src,
// then reap lease immediately and close the file.
if(nrCompleteBlocks == nrBlocks) {
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot());
iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK*"
+ " internalReleaseLease: All existing blocks are COMPLETE,"
+ " lease removed, file closed.");
@ -3685,7 +3693,7 @@ boolean internalReleaseLease(Lease lease, String src,
if(penultimateBlockMinReplication &&
blockManager.checkMinReplication(lastBlock)) {
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot());
iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK*"
+ " internalReleaseLease: Committed blocks are minimally replicated,"
+ " lease removed, file closed.");
@ -3716,7 +3724,7 @@ boolean internalReleaseLease(Lease lease, String src,
// We can remove this block and close the file.
pendingFile.removeLastBlock(lastBlock);
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot());
iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ "Removed empty last block and closed file.");
return true;
@ -3777,7 +3785,7 @@ private void commitOrCompleteLastBlock(final INodeFile fileINode,
}
private void finalizeINodeFileUnderConstruction(String src,
INodeFile pendingFile, Snapshot latestSnapshot) throws IOException,
INodeFile pendingFile, int latestSnapshot) throws IOException,
UnresolvedLinkException {
assert hasWriteLock();
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
@ -3977,7 +3985,7 @@ String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
//remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile,
Snapshot.findLatestSnapshot(pendingFile, null));
Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID));
return src;
}
@ -7131,7 +7139,7 @@ void modifyCacheDirective(CacheDirectiveInfo directive,
getEditLog().logSync();
}
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addCacheDirective", null, null, null);
logAuditEvent(success, "modifyCacheDirective", null, null, null);
}
RetryCache.setState(cacheEntry, success);
}

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
@ -29,7 +28,6 @@
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@ -142,54 +140,54 @@ void checkPermission(String path, INodeDirectory root, boolean doCheckOwner,
// check if (parentAccess != null) && file exists, then check sb
// If resolveLink, the check is performed on the link target.
final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink);
final Snapshot snapshot = inodesInPath.getPathSnapshot();
final int snapshotId = inodesInPath.getPathSnapshotId();
final INode[] inodes = inodesInPath.getINodes();
int ancestorIndex = inodes.length - 2;
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
ancestorIndex--);
checkTraverse(inodes, ancestorIndex, snapshot);
checkTraverse(inodes, ancestorIndex, snapshotId);
final INode last = inodes[inodes.length - 1];
if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
&& inodes.length > 1 && last != null) {
checkStickyBit(inodes[inodes.length - 2], last, snapshot);
checkStickyBit(inodes[inodes.length - 2], last, snapshotId);
}
if (ancestorAccess != null && inodes.length > 1) {
check(inodes, ancestorIndex, snapshot, ancestorAccess);
check(inodes, ancestorIndex, snapshotId, ancestorAccess);
}
if (parentAccess != null && inodes.length > 1) {
check(inodes, inodes.length - 2, snapshot, parentAccess);
check(inodes, inodes.length - 2, snapshotId, parentAccess);
}
if (access != null) {
check(last, snapshot, access);
check(last, snapshotId, access);
}
if (subAccess != null) {
checkSubAccess(last, snapshot, subAccess);
checkSubAccess(last, snapshotId, subAccess);
}
if (doCheckOwner) {
checkOwner(last, snapshot);
checkOwner(last, snapshotId);
}
}
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkOwner(INode inode, Snapshot snapshot
private void checkOwner(INode inode, int snapshotId
) throws AccessControlException {
if (inode != null && user.equals(inode.getUserName(snapshot))) {
if (inode != null && user.equals(inode.getUserName(snapshotId))) {
return;
}
throw new AccessControlException("Permission denied");
}
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkTraverse(INode[] inodes, int last, Snapshot snapshot
private void checkTraverse(INode[] inodes, int last, int snapshotId
) throws AccessControlException {
for(int j = 0; j <= last; j++) {
check(inodes[j], snapshot, FsAction.EXECUTE);
check(inodes[j], snapshotId, FsAction.EXECUTE);
}
}
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access
private void checkSubAccess(INode inode, int snapshotId, FsAction access
) throws AccessControlException {
if (inode == null || !inode.isDirectory()) {
return;
@ -198,9 +196,9 @@ private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access
Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
INodeDirectory d = directories.pop();
check(d, snapshot, access);
check(d, snapshotId, access);
for(INode child : d.getChildrenList(snapshot)) {
for(INode child : d.getChildrenList(snapshotId)) {
if (child.isDirectory()) {
directories.push(child.asDirectory());
}
@ -209,23 +207,23 @@ private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access
}
/** Guarded by {@link FSNamesystem#readLock()} */
private void check(INode[] inodes, int i, Snapshot snapshot, FsAction access
private void check(INode[] inodes, int i, int snapshotId, FsAction access
) throws AccessControlException {
check(i >= 0? inodes[i]: null, snapshot, access);
check(i >= 0? inodes[i]: null, snapshotId, access);
}
/** Guarded by {@link FSNamesystem#readLock()} */
private void check(INode inode, Snapshot snapshot, FsAction access
private void check(INode inode, int snapshotId, FsAction access
) throws AccessControlException {
if (inode == null) {
return;
}
FsPermission mode = inode.getFsPermission(snapshot);
FsPermission mode = inode.getFsPermission(snapshotId);
if (user.equals(inode.getUserName(snapshot))) { //user class
if (user.equals(inode.getUserName(snapshotId))) { //user class
if (mode.getUserAction().implies(access)) { return; }
}
else if (groups.contains(inode.getGroupName(snapshot))) { //group class
else if (groups.contains(inode.getGroupName(snapshotId))) { //group class
if (mode.getGroupAction().implies(access)) { return; }
}
else { //other class
@ -236,19 +234,19 @@ else if (groups.contains(inode.getGroupName(snapshot))) { //group class
}
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkStickyBit(INode parent, INode inode, Snapshot snapshot
private void checkStickyBit(INode parent, INode inode, int snapshotId
) throws AccessControlException {
if(!parent.getFsPermission(snapshot).getStickyBit()) {
if(!parent.getFsPermission(snapshotId).getStickyBit()) {
return;
}
// If this user is the directory owner, return
if(parent.getUserName(snapshot).equals(user)) {
if(parent.getUserName(snapshotId).equals(user)) {
return;
}
// if this user is the file owner, return
if(inode.getUserName(snapshot).equals(user)) {
if(inode.getUserName(snapshotId).equals(user)) {
return;
}

View File

@ -70,98 +70,101 @@ final boolean isRoot() {
}
/** Get the {@link PermissionStatus} */
abstract PermissionStatus getPermissionStatus(Snapshot snapshot);
abstract PermissionStatus getPermissionStatus(int snapshotId);
/** The same as getPermissionStatus(null). */
final PermissionStatus getPermissionStatus() {
return getPermissionStatus(null);
return getPermissionStatus(Snapshot.CURRENT_STATE_ID);
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current inode.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the given snapshot; otherwise, get the result from the
* current inode.
* @return user name
*/
abstract String getUserName(Snapshot snapshot);
abstract String getUserName(int snapshotId);
/** The same as getUserName(null). */
/** The same as getUserName(Snapshot.CURRENT_STATE_ID). */
@Override
public final String getUserName() {
return getUserName(null);
return getUserName(Snapshot.CURRENT_STATE_ID);
}
/** Set user */
abstract void setUser(String user);
/** Set user */
final INode setUser(String user, Snapshot latest)
final INode setUser(String user, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setUser(user);
return nodeToUpdate;
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current inode.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the given snapshot; otherwise, get the result from the
* current inode.
* @return group name
*/
abstract String getGroupName(Snapshot snapshot);
abstract String getGroupName(int snapshotId);
/** The same as getGroupName(null). */
/** The same as getGroupName(Snapshot.CURRENT_STATE_ID). */
@Override
public final String getGroupName() {
return getGroupName(null);
return getGroupName(Snapshot.CURRENT_STATE_ID);
}
/** Set group */
abstract void setGroup(String group);
/** Set group */
final INode setGroup(String group, Snapshot latest)
final INode setGroup(String group, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setGroup(group);
return nodeToUpdate;
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current inode.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the given snapshot; otherwise, get the result from the
* current inode.
* @return permission.
*/
abstract FsPermission getFsPermission(Snapshot snapshot);
abstract FsPermission getFsPermission(int snapshotId);
/** The same as getFsPermission(null). */
/** The same as getFsPermission(Snapshot.CURRENT_STATE_ID). */
@Override
public final FsPermission getFsPermission() {
return getFsPermission(null);
return getFsPermission(Snapshot.CURRENT_STATE_ID);
}
/** Set the {@link FsPermission} of this {@link INode} */
abstract void setPermission(FsPermission permission);
/** Set the {@link FsPermission} of this {@link INode} */
INode setPermission(FsPermission permission, Snapshot latest)
INode setPermission(FsPermission permission, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setPermission(permission);
return nodeToUpdate;
}
/**
* @return if the given snapshot is null, return this;
* otherwise return the corresponding snapshot inode.
* @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},
* return this; otherwise return the corresponding snapshot inode.
*/
public INodeAttributes getSnapshotINode(final Snapshot snapshot) {
public INodeAttributes getSnapshotINode(final int snapshotId) {
return this;
}
/** Is this inode in the latest snapshot? */
public final boolean isInLatestSnapshot(final Snapshot latest) {
if (latest == null) {
public final boolean isInLatestSnapshot(final int latestSnapshotId) {
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
return false;
}
// if parent is a reference node, parent must be a renamed node. We can
@ -173,10 +176,11 @@ public final boolean isInLatestSnapshot(final Snapshot latest) {
if (parentDir == null) { // root
return true;
}
if (!parentDir.isInLatestSnapshot(latest)) {
if (!parentDir.isInLatestSnapshot(latestSnapshotId)) {
return false;
}
final INode child = parentDir.getChild(getLocalNameBytes(), latest);
final INode child = parentDir.getChild(getLocalNameBytes(),
latestSnapshotId);
if (this == child) {
return true;
}
@ -203,21 +207,22 @@ public final boolean isAncestorDirectory(final INodeDirectory dir) {
* operation, or the snapshot belonging to the DST tree.
*
* @param latestInDst
* the latest snapshot in the DST tree above the reference node
* id of the latest snapshot in the DST tree above the reference node
* @return True: the modification should be recorded in the snapshot that
* belongs to the SRC tree. False: the modification should be
* recorded in the snapshot that belongs to the DST tree.
*/
public final boolean shouldRecordInSrcSnapshot(final Snapshot latestInDst) {
public final boolean shouldRecordInSrcSnapshot(final int latestInDst) {
Preconditions.checkState(!isReference());
if (latestInDst == null) {
if (latestInDst == Snapshot.CURRENT_STATE_ID) {
return true;
}
INodeReference withCount = getParentReference();
if (withCount != null) {
int dstSnapshotId = withCount.getParentReference().getDstSnapshotId();
if (dstSnapshotId >= latestInDst.getId()) {
if (dstSnapshotId != Snapshot.CURRENT_STATE_ID
&& dstSnapshotId >= latestInDst) {
return true;
}
}
@ -228,13 +233,14 @@ public final boolean shouldRecordInSrcSnapshot(final Snapshot latestInDst) {
* This inode is being modified. The previous version of the inode needs to
* be recorded in the latest snapshot.
*
* @param latest the latest snapshot that has been taken.
* Note that it is null if no snapshots have been taken.
* @param latestSnapshotId The id of the latest snapshot that has been taken.
* Note that it is {@link Snapshot#CURRENT_STATE_ID}
* if no snapshots have been taken.
* @return The current inode, which usually is the same object of this inode.
* However, in some cases, this inode may be replaced with a new inode
* for maintaining snapshots. The current inode is then the new inode.
*/
abstract INode recordModification(final Snapshot latest)
abstract INode recordModification(final int latestSnapshotId)
throws QuotaExceededException;
/** Check whether it's a reference. */
@ -330,12 +336,13 @@ public INodeSymlink asSymlink() {
* snapshot in its diff list. Recursively clean its children.
* </pre>
*
* @param snapshot
* The snapshot to delete. Null means to delete the current
* @param snapshotId
* The id of the snapshot to delete.
* {@link Snapshot#CURRENT_STATE_ID} means to delete the current
* file/directory.
* @param prior
* The latest snapshot before the to-be-deleted snapshot. When
* deleting a current inode, this parameter captures the latest
* @param priorSnapshotId
* The id of the latest snapshot before the to-be-deleted snapshot.
* When deleting a current inode, this parameter captures the latest
* snapshot.
* @param collectedBlocks
* blocks collected from the descents for further block
@ -345,8 +352,8 @@ public INodeSymlink asSymlink() {
* inodeMap
* @return quota usage delta when deleting a snapshot
*/
public abstract Quota.Counts cleanSubtree(final Snapshot snapshot,
Snapshot prior, BlocksMapUpdateInfo collectedBlocks,
public abstract Quota.Counts cleanSubtree(final int snapshotId,
int priorSnapshotId, BlocksMapUpdateInfo collectedBlocks,
List<INode> removedINodes, boolean countDiffChange)
throws QuotaExceededException;
@ -460,9 +467,10 @@ public final Quota.Counts computeQuotaUsage() {
* @param counts The subtree counts for returning.
* @param useCache Whether to use cached quota usage. Note that
* {@link WithName} node never uses cache for its subtree.
* @param lastSnapshotId {@link Snapshot#INVALID_ID} indicates the computation
* is in the current tree. Otherwise the id indicates
* the computation range for a {@link WithName} node.
* @param lastSnapshotId {@link Snapshot#CURRENT_STATE_ID} indicates the
* computation is in the current tree. Otherwise the id
* indicates the computation range for a
* {@link WithName} node.
* @return The same objects as the counts parameter.
*/
public abstract Quota.Counts computeQuotaUsage(Quota.Counts counts,
@ -470,7 +478,7 @@ public abstract Quota.Counts computeQuotaUsage(Quota.Counts counts,
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache) {
return computeQuotaUsage(counts, useCache, Snapshot.INVALID_ID);
return computeQuotaUsage(counts, useCache, Snapshot.CURRENT_STATE_ID);
}
/**
@ -558,21 +566,22 @@ public void clear() {
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current inode.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the given snapshot; otherwise, get the result from the
* current inode.
* @return modification time.
*/
abstract long getModificationTime(Snapshot snapshot);
abstract long getModificationTime(int snapshotId);
/** The same as getModificationTime(null). */
/** The same as getModificationTime(Snapshot.CURRENT_STATE_ID). */
@Override
public final long getModificationTime() {
return getModificationTime(null);
return getModificationTime(Snapshot.CURRENT_STATE_ID);
}
/** Update modification time if it is larger than the current value. */
public abstract INode updateModificationTime(long mtime, Snapshot latest)
public abstract INode updateModificationTime(long mtime, int latestSnapshotId)
throws QuotaExceededException;
/** Set the last modification time of inode. */
@ -580,24 +589,25 @@ public abstract INode updateModificationTime(long mtime, Snapshot latest)
/** Set the last modification time of inode. */
public final INode setModificationTime(long modificationTime,
Snapshot latest) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
int latestSnapshotId) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setModificationTime(modificationTime);
return nodeToUpdate;
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current inode.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the given snapshot; otherwise, get the result from the
* current inode.
* @return access time
*/
abstract long getAccessTime(Snapshot snapshot);
abstract long getAccessTime(int snapshotId);
/** The same as getAccessTime(null). */
/** The same as getAccessTime(Snapshot.CURRENT_STATE_ID). */
@Override
public final long getAccessTime() {
return getAccessTime(null);
return getAccessTime(Snapshot.CURRENT_STATE_ID);
}
/**
@ -608,9 +618,9 @@ public final long getAccessTime() {
/**
* Set last access time of inode.
*/
public final INode setAccessTime(long accessTime, Snapshot latest)
public final INode setAccessTime(long accessTime, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setAccessTime(accessTime);
return nodeToUpdate;
}
@ -679,13 +689,15 @@ public final int hashCode() {
@VisibleForTesting
public final StringBuffer dumpTreeRecursively() {
final StringWriter out = new StringWriter();
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null);
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
return out.getBuffer();
}
@VisibleForTesting
public final void dumpTreeRecursively(PrintStream out) {
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null);
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
}
/**
@ -694,7 +706,7 @@ public final void dumpTreeRecursively(PrintStream out) {
*/
@VisibleForTesting
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
Snapshot snapshot) {
int snapshotId) {
out.print(prefix);
out.print(" ");
final String name = getLocalName();
@ -703,7 +715,7 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
out.print(getObjectString());
out.print("), ");
out.print(getParentString());
out.print(", " + getPermissionStatus(snapshot));
out.print(", " + getPermissionStatus(snapshotId));
}
/**

View File

@ -204,9 +204,9 @@ public DirectoryDiffList getDiffs() {
}
@Override
public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) {
public INodeDirectoryAttributes getSnapshotINode(int snapshotId) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshot, this);
return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshotId, this);
}
@Override
@ -217,12 +217,13 @@ public String toDetailString() {
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
int latestSnapshotId, final INodeMap inodeMap)
throws QuotaExceededException {
Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
"this is already an INodeDirectorySnapshottable, this=%s", this);
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
.saveSelf2Snapshot(latest, s, this);
.saveSelf2Snapshot(latestSnapshotId, s, this);
return s;
}
@ -289,8 +290,8 @@ public void replaceChild(INode oldChild, final INode newChild,
}
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
Snapshot latest) {
Preconditions.checkArgument(latest != null);
int latestSnapshotId) {
Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
if (oldChild instanceof INodeReference.WithName) {
return (INodeReference.WithName)oldChild;
}
@ -304,22 +305,23 @@ INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
withCount = new INodeReference.WithCount(null, oldChild);
}
final INodeReference.WithName ref = new INodeReference.WithName(this,
withCount, oldChild.getLocalNameBytes(), latest.getId());
withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
replaceChild(oldChild, ref, null);
return ref;
}
@Override
public INodeDirectory recordModification(Snapshot latest)
public INodeDirectory recordModification(int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// add snapshot feature if necessary
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
return this;
}
@ -329,9 +331,9 @@ public INodeDirectory recordModification(Snapshot latest)
*
* @return the child inode, which may be replaced.
*/
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
public INode saveChild2Snapshot(final INode child, final int latestSnapshotId,
final INode snapshotCopy) throws QuotaExceededException {
if (latest == null) {
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
return child;
}
@ -340,42 +342,45 @@ public INode saveChild2Snapshot(final INode child, final Snapshot latest,
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.saveChild2Snapshot(this, child, latest, snapshotCopy);
return sf.saveChild2Snapshot(this, child, latestSnapshotId, snapshotCopy);
}
/**
* @param name the name of the child
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current directory.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the corresponding snapshot; otherwise, get the result from
* the current directory.
* @return the child inode.
*/
public INode getChild(byte[] name, Snapshot snapshot) {
public INode getChild(byte[] name, int snapshotId) {
DirectoryWithSnapshotFeature sf;
if (snapshot == null || (sf = getDirectoryWithSnapshotFeature()) == null) {
if (snapshotId == Snapshot.CURRENT_STATE_ID ||
(sf = getDirectoryWithSnapshotFeature()) == null) {
ReadOnlyList<INode> c = getCurrentChildrenList();
final int i = ReadOnlyList.Util.binarySearch(c, name);
return i < 0 ? null : c.get(i);
}
return sf.getChild(this, name, snapshot);
return sf.getChild(this, name, snapshotId);
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current directory.
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the corresponding snapshot; otherwise, get the result from
* the current directory.
* @return the current children list if the specified snapshot is null;
* otherwise, return the children list corresponding to the snapshot.
* Note that the returned list is never null.
*/
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
public ReadOnlyList<INode> getChildrenList(final int snapshotId) {
DirectoryWithSnapshotFeature sf;
if (snapshot == null
if (snapshotId == Snapshot.CURRENT_STATE_ID
|| (sf = this.getDirectoryWithSnapshotFeature()) == null) {
return getCurrentChildrenList();
}
return sf.getChildrenList(this, snapshot);
return sf.getChildrenList(this, snapshotId);
}
private ReadOnlyList<INode> getCurrentChildrenList() {
@ -450,15 +455,15 @@ static int nextChild(ReadOnlyList<INode> children, byte[] name) {
/**
* Remove the specified child from this directory.
*/
public boolean removeChild(INode child, Snapshot latest)
public boolean removeChild(INode child, int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.removeChild(this, child, latest);
return sf.removeChild(this, child, latestSnapshotId);
}
return removeChild(child);
}
@ -493,24 +498,24 @@ public boolean removeChild(final INode child) {
* otherwise, return true;
*/
public boolean addChild(INode node, final boolean setModTime,
final Snapshot latest) throws QuotaExceededException {
final int latestSnapshotId) throws QuotaExceededException {
final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) {
return false;
}
if (isInLatestSnapshot(latest)) {
if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.addChild(this, node, setModTime, latest);
return sf.addChild(this, node, setModTime, latestSnapshotId);
}
addChild(node, low);
if (setModTime) {
// update modification time of the parent directory
updateModificationTime(node.getModificationTime(), latest);
updateModificationTime(node.getModificationTime(), latestSnapshotId);
}
return true;
}
@ -548,10 +553,9 @@ public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
// we are computing the quota usage for a specific snapshot here, i.e., the
// computation only includes files/directories that exist at the time of the
// given snapshot
if (sf != null && lastSnapshotId != Snapshot.INVALID_ID
if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
&& !(useCache && isQuotaSet())) {
Snapshot lastSnapshot = sf.getDiffs().getSnapshotById(lastSnapshotId);
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
for (INode child : childrenList) {
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
}
@ -607,7 +611,7 @@ public ContentSummaryComputationContext computeContentSummary(
ContentSummaryComputationContext computeDirectoryContentSummary(
ContentSummaryComputationContext summary) {
ReadOnlyList<INode> childrenList = getChildrenList(null);
ReadOnlyList<INode> childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID);
// Explicit traversing is done to enable repositioning after relinquishing
// and reacquiring locks.
for (int i = 0; i < childrenList.size(); i++) {
@ -629,7 +633,7 @@ ContentSummaryComputationContext computeDirectoryContentSummary(
break;
}
// Obtain the children list again since it may have been modified.
childrenList = getChildrenList(null);
childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID);
// Reposition in case the children list is changed. Decrement by 1
// since it will be incremented when loops.
i = nextChild(childrenList, childName) - 1;
@ -668,21 +672,16 @@ ContentSummaryComputationContext computeDirectoryContentSummary(
* The reference node to be removed/replaced
* @param newChild
* The node to be added back
* @param latestSnapshot
* The latest snapshot. Note this may not be the last snapshot in the
* diff list, since the src tree of the current rename operation
* may be the dst tree of a previous rename.
* @throws QuotaExceededException should not throw this exception
*/
public void undoRename4ScrParent(final INodeReference oldChild,
final INode newChild, Snapshot latestSnapshot)
throws QuotaExceededException {
final INode newChild) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
Preconditions.checkState(sf != null,
"Directory does not have snapshot feature");
sf.getDiffs().removeChild(ListType.DELETED, oldChild);
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
addChild(newChild, true, null);
addChild(newChild, true, Snapshot.CURRENT_STATE_ID);
}
/**
@ -691,16 +690,14 @@ public void undoRename4ScrParent(final INodeReference oldChild,
* and delete possible record in the deleted list.
*/
public void undoRename4DstParent(final INode deletedChild,
Snapshot latestSnapshot) throws QuotaExceededException {
int latestSnapshotId) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
Preconditions.checkState(sf != null,
"Directory does not have snapshot feature");
boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
deletedChild);
// pass null for inodeMap since the parent node will not get replaced when
// undoing rename
final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
: latestSnapshot);
int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
final boolean added = addChild(deletedChild, true, sid);
// update quota usage if adding is successfully and the old child has not
// been stored in deleted list before
if (added && !removeDeletedChild) {
@ -722,8 +719,8 @@ public void clear() {
}
/** Call cleanSubtree(..) recursively down the subtree. */
public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
public Quota.Counts cleanSubtreeRecursively(final int snapshot,
int prior, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final Map<INode, INode> excludedNodes,
final boolean countDiffChange) throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
@ -732,9 +729,10 @@ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
// to its latest previous snapshot. (besides, we also need to consider nodes
// created after prior but before snapshot. this will be done in
// DirectoryWithSnapshotFeature)
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
int s = snapshot != Snapshot.CURRENT_STATE_ID
&& prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot;
for (INode child : getChildrenList(s)) {
if (snapshot != null && excludedNodes != null
if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null
&& excludedNodes.containsKey(child)) {
continue;
} else {
@ -753,7 +751,7 @@ public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks,
if (sf != null) {
sf.clear(this, collectedBlocks, removedINodes);
}
for (INode child : getChildrenList(null)) {
for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
child.destroyAndCollectBlocks(collectedBlocks, removedINodes);
}
clear();
@ -761,18 +759,19 @@ public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks,
}
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
public Quota.Counts cleanSubtree(final int snapshotId, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data
if (sf != null) {
return sf.cleanDirectory(this, snapshot, prior, collectedBlocks,
removedINodes, countDiffChange);
return sf.cleanDirectory(this, snapshotId, priorSnapshotId,
collectedBlocks, removedINodes, countDiffChange);
}
// there is no snapshot data
if (prior == null && snapshot == null) {
if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID
&& snapshotId == Snapshot.CURRENT_STATE_ID) {
// destroy the whole subtree and collect blocks that should be deleted
Quota.Counts counts = Quota.Counts.newInstance();
this.computeQuotaUsage(counts, true);
@ -780,7 +779,7 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
return counts;
} else {
// process recursively down the subtree
Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
Quota.Counts counts = cleanSubtreeRecursively(snapshotId, priorSnapshotId,
collectedBlocks, removedINodes, null, countDiffChange);
if (isQuotaSet()) {
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
@ -816,7 +815,7 @@ && getQuotaCounts().equals(other.getQuotaCounts())
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.print(", childrenSize=" + getChildrenList(snapshot).size());
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
@ -824,7 +823,7 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
out.print(", " + q);
}
if (this instanceof Snapshot.Root) {
out.print(", snapshotId=" + snapshot.getId());
out.print(", snapshotId=" + snapshot);
}
out.println();
@ -869,7 +868,7 @@ protected static void dumpTreeRecursively(PrintWriter out,
for(final Iterator<SnapshotAndINode> i = subs.iterator(); i.hasNext();) {
final SnapshotAndINode pair = i.next();
prefix.append(i.hasNext()? DUMPTREE_EXCEPT_LAST_ITEM: DUMPTREE_LAST_ITEM);
pair.inode.dumpTreeRecursively(out, prefix, pair.snapshot);
pair.inode.dumpTreeRecursively(out, prefix, pair.snapshotId);
prefix.setLength(prefix.length() - 2);
}
}
@ -877,20 +876,16 @@ protected static void dumpTreeRecursively(PrintWriter out,
/** A pair of Snapshot and INode objects. */
protected static class SnapshotAndINode {
public final Snapshot snapshot;
public final int snapshotId;
public final INode inode;
public SnapshotAndINode(Snapshot snapshot, INode inode) {
this.snapshot = snapshot;
public SnapshotAndINode(int snapshot, INode inode) {
this.snapshotId = snapshot;
this.inode = inode;
}
public SnapshotAndINode(Snapshot snapshot) {
this(snapshot, snapshot.getRoot());
}
}
public final int getChildrenNum(final Snapshot snapshot) {
return getChildrenList(snapshot).size();
public final int getChildrenNum(final int snapshotId) {
return getChildrenList(snapshotId).size();
}
}

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
@ -272,26 +275,27 @@ public String toDetailString() {
}
@Override
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
public INodeFileAttributes getSnapshotINode(final int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
return sf.getDiffs().getSnapshotINode(snapshot, this);
return sf.getDiffs().getSnapshotINode(snapshotId, this);
} else {
return this;
}
}
@Override
public INodeFile recordModification(final Snapshot latest)
public INodeFile recordModification(final int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// the file is in snapshot, create a snapshot feature if it does not have
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
return this;
}
@ -307,23 +311,22 @@ public FileDiffList getDiffs() {
/* End of Snapshot Feature */
/** @return the replication factor of the file. */
public final short getFileReplication(Snapshot snapshot) {
if (snapshot != null) {
public final short getFileReplication(int snapshot) {
if (snapshot != CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getFileReplication();
}
return HeaderFormat.getReplication(header);
}
/** The same as getFileReplication(null). */
@Override // INodeFileAttributes
public final short getFileReplication() {
return getFileReplication(null);
return getFileReplication(CURRENT_STATE_ID);
}
@Override // BlockCollection
public short getBlockReplication() {
short max = getFileReplication(null);
short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
short maxInSnapshot = sf.getMaxBlockRepInDiffs();
@ -341,9 +344,10 @@ public final void setFileReplication(short replication) {
}
/** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication, Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
final INodeFile nodeToUpdate = recordModification(latest);
public final INodeFile setFileReplication(short replication,
int latestSnapshotId, final INodeMap inodeMap)
throws QuotaExceededException {
final INodeFile nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setFileReplication(replication);
return nodeToUpdate;
}
@ -421,22 +425,22 @@ public void setBlocks(BlockInfo[] blocks) {
}
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
public Quota.Counts cleanSubtree(final int snapshot, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
return sf.cleanFile(this, snapshot, prior, collectedBlocks,
return sf.cleanFile(this, snapshot, priorSnapshotId, collectedBlocks,
removedINodes, countDiffChange);
}
Quota.Counts counts = Quota.Counts.newInstance();
if (snapshot == null && prior == null) {
if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) {
// this only happens when deleting the current file and the file is not
// in any snapshot
computeQuotaUsage(counts, false);
destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else if (snapshot == null && prior != null) {
} else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) {
// when deleting the current file and the file is in snapshot, we should
// clean the 0-sized block if the file is UC
FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
@ -480,17 +484,18 @@ public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiffList fileDiffList = sf.getDiffs();
Snapshot last = fileDiffList.getLastSnapshot();
int last = fileDiffList.getLastSnapshotId();
List<FileDiff> diffs = fileDiffList.asList();
if (lastSnapshotId == Snapshot.INVALID_ID || last == null) {
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
nsDelta += diffs.size();
dsDelta = diskspaceConsumed();
} else if (last.getId() < lastSnapshotId) {
} else if (last < lastSnapshotId) {
dsDelta = computeFileSize(true, false) * getFileReplication();
} else {
Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId);
dsDelta = diskspaceConsumed(s);
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
dsDelta = diskspaceConsumed(sid);
}
} else {
dsDelta = diskspaceConsumed();
@ -501,7 +506,7 @@ public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
}
@Override
public final ContentSummaryComputationContext computeContentSummary(
public final ContentSummaryComputationContext computeContentSummary(
final ContentSummaryComputationContext summary) {
computeContentSummary4Snapshot(summary.getCounts());
computeContentSummary4Current(summary.getCounts());
@ -540,23 +545,21 @@ private void computeContentSummary4Current(final Content.Counts counts) {
/** The same as computeFileSize(null). */
public final long computeFileSize() {
return computeFileSize(null);
return computeFileSize(CURRENT_STATE_ID);
}
/**
* Compute file size of the current file if the given snapshot is null;
* otherwise, get the file size from the given snapshot.
*/
public final long computeFileSize(Snapshot snapshot) {
public final long computeFileSize(int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (snapshot != null && sf != null) {
final FileDiff d = sf.getDiffs().getDiff(
snapshot);
if (snapshotId != CURRENT_STATE_ID && sf != null) {
final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
if (d != null) {
return d.getFileSize();
}
}
return computeFileSize(true, false);
}
@ -607,9 +610,10 @@ public final long diskspaceConsumed() {
return computeFileSize(true, true) * getBlockReplication();
}
public final long diskspaceConsumed(Snapshot lastSnapshot) {
if (lastSnapshot != null) {
return computeFileSize(lastSnapshot) * getFileReplication(lastSnapshot);
public final long diskspaceConsumed(int lastSnapshotId) {
if (lastSnapshotId != CURRENT_STATE_ID) {
return computeFileSize(lastSnapshotId)
* getFileReplication(lastSnapshotId);
} else {
return diskspaceConsumed();
}
@ -638,9 +642,9 @@ public int numBlocks() {
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.print(", fileSize=" + computeFileSize(snapshot));
final int snapshotId) {
super.dumpTreeRecursively(out, prefix, snapshotId);
out.print(", fileSize=" + computeFileSize(snapshotId));
// only compare the first block
out.print(", blocks=");
out.print(blocks == null || blocks.length == 0? null: blocks[0]);

View File

@ -23,7 +23,6 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
@ -89,7 +88,8 @@ public INode get(long id) {
"", "", new FsPermission((short) 0)), 0, 0) {
@Override
INode recordModification(Snapshot latest) throws QuotaExceededException {
INode recordModification(int latestSnapshotId)
throws QuotaExceededException {
return null;
}
@ -112,7 +112,7 @@ public ContentSummaryComputationContext computeContentSummary(
}
@Override
public Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
public Counts cleanSubtree(int snapshotId, int priorSnapshotId,
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
boolean countDiffChange) throws QuotaExceededException {
return null;

View File

@ -91,7 +91,7 @@ private static int removeReference(INodeReference ref) {
* method to identify the snapshot which is the latest snapshot before the
* reference node's creation.
*/
static Snapshot getPriorSnapshot(INodeReference ref) {
static int getPriorSnapshot(INodeReference ref) {
WithCount wc = (WithCount) ref.getReferredINode();
WithName wn = null;
if (ref instanceof DstReference) {
@ -111,7 +111,7 @@ static Snapshot getPriorSnapshot(INodeReference ref) {
}
}
}
return null;
return Snapshot.NO_SNAPSHOT_ID;
}
private INode referred;
@ -185,13 +185,13 @@ public final long getId() {
}
@Override
public final PermissionStatus getPermissionStatus(Snapshot snapshot) {
return referred.getPermissionStatus(snapshot);
public final PermissionStatus getPermissionStatus(int snapshotId) {
return referred.getPermissionStatus(snapshotId);
}
@Override
public final String getUserName(Snapshot snapshot) {
return referred.getUserName(snapshot);
public final String getUserName(int snapshotId) {
return referred.getUserName(snapshotId);
}
@Override
@ -200,8 +200,8 @@ final void setUser(String user) {
}
@Override
public final String getGroupName(Snapshot snapshot) {
return referred.getGroupName(snapshot);
public final String getGroupName(int snapshotId) {
return referred.getGroupName(snapshotId);
}
@Override
@ -210,8 +210,8 @@ final void setGroup(String group) {
}
@Override
public final FsPermission getFsPermission(Snapshot snapshot) {
return referred.getFsPermission(snapshot);
public final FsPermission getFsPermission(int snapshotId) {
return referred.getFsPermission(snapshotId);
}
@Override
public final short getFsPermissionShort() {
@ -229,14 +229,14 @@ public long getPermissionLong() {
}
@Override
public final long getModificationTime(Snapshot snapshot) {
return referred.getModificationTime(snapshot);
public final long getModificationTime(int snapshotId) {
return referred.getModificationTime(snapshotId);
}
@Override
public final INode updateModificationTime(long mtime, Snapshot latest)
public final INode updateModificationTime(long mtime, int latestSnapshotId)
throws QuotaExceededException {
return referred.updateModificationTime(mtime, latest);
return referred.updateModificationTime(mtime, latestSnapshotId);
}
@Override
@ -245,8 +245,8 @@ public final void setModificationTime(long modificationTime) {
}
@Override
public final long getAccessTime(Snapshot snapshot) {
return referred.getAccessTime(snapshot);
public final long getAccessTime(int snapshotId) {
return referred.getAccessTime(snapshotId);
}
@Override
@ -255,15 +255,15 @@ public final void setAccessTime(long accessTime) {
}
@Override
final INode recordModification(Snapshot latest)
final INode recordModification(int latestSnapshotId)
throws QuotaExceededException {
referred.recordModification(latest);
referred.recordModification(latestSnapshotId);
// reference is never replaced
return this;
}
@Override // used by WithCount
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
public Quota.Counts cleanSubtree(int snapshot, int prior,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes,
final boolean countDiffChange) throws QuotaExceededException {
return referred.cleanSubtree(snapshot, prior, collectedBlocks,
@ -291,8 +291,8 @@ public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
}
@Override
public final INodeAttributes getSnapshotINode(Snapshot snapshot) {
return referred.getSnapshotINode(snapshot);
public final INodeAttributes getSnapshotINode(int snapshotId) {
return referred.getSnapshotINode(snapshotId);
}
@Override
@ -308,7 +308,7 @@ public final void clear() {
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
if (this instanceof DstReference) {
out.print(", dstSnapshotId=" + ((DstReference) this).dstSnapshotId);
@ -327,7 +327,7 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
}
public int getDstSnapshotId() {
return Snapshot.INVALID_ID;
return Snapshot.CURRENT_STATE_ID;
}
/** An anonymous reference with reference count. */
@ -457,34 +457,35 @@ public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
// node happened before the rename of its ancestor. This should be
// impossible since for WithName node we only count its children at the
// time of the rename.
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId);
Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| this.lastSnapshotId >= lastSnapshotId);
final INode referred = this.getReferredINode().asReference()
.getReferredINode();
// We will continue the quota usage computation using the same snapshot id
// as time line (if the given snapshot id is valid). Also, we cannot use
// cache for the referred node since its cached quota may have already
// been updated by changes in the current tree.
int id = lastSnapshotId > Snapshot.INVALID_ID ?
int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ?
lastSnapshotId : this.lastSnapshotId;
return referred.computeQuotaUsage(counts, false, id);
}
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
public Quota.Counts cleanSubtree(final int snapshot, int prior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
// since WithName node resides in deleted list acting as a snapshot copy,
// the parameter snapshot must be non-null
Preconditions.checkArgument(snapshot != null);
// if prior is null, we need to check snapshot belonging to the previous
// WithName instance
if (prior == null) {
Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID);
// if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to the
// previous WithName instance
if (prior == Snapshot.NO_SNAPSHOT_ID) {
prior = getPriorSnapshot(this);
}
if (prior != null
&& Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) {
if (prior != Snapshot.NO_SNAPSHOT_ID
&& Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
return Quota.Counts.newInstance();
}
@ -496,7 +497,7 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
-counts.get(Quota.DISKSPACE), true);
}
if (snapshot.getId() < lastSnapshotId) {
if (snapshot < lastSnapshotId) {
// for a WithName node, when we compute its quota usage, we only count
// in all the nodes existing at the time of the corresponding rename op.
// Thus if we are deleting a snapshot before/at the snapshot associated
@ -509,16 +510,16 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
Snapshot snapshot = getSelfSnapshot();
int snapshot = getSelfSnapshot();
if (removeReference(this) <= 0) {
getReferredINode().destroyAndCollectBlocks(collectedBlocks,
removedINodes);
} else {
Snapshot prior = getPriorSnapshot(this);
int prior = getPriorSnapshot(this);
INode referred = getReferredINode().asReference().getReferredINode();
if (snapshot != null) {
if (prior != null && snapshot.getId() <= prior.getId()) {
if (snapshot != Snapshot.NO_SNAPSHOT_ID) {
if (prior != Snapshot.NO_SNAPSHOT_ID && snapshot <= prior) {
// the snapshot to be deleted has been deleted while traversing
// the src tree of the previous rename operation. This usually
// happens when rename's src and dst are under the same
@ -545,9 +546,9 @@ public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
}
}
private Snapshot getSelfSnapshot() {
private int getSelfSnapshot() {
INode referred = getReferredINode().asReference().getReferredINode();
Snapshot snapshot = null;
int snapshot = Snapshot.NO_SNAPSHOT_ID;
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId);
} else if (referred.isDirectory()) {
@ -569,7 +570,7 @@ public static class DstReference extends INodeReference {
* latest snapshot. Otherwise changes will be recorded to the snapshot
* belonging to the src of the rename.
*
* {@link Snapshot#INVALID_ID} means no dstSnapshot (e.g., src of the
* {@link Snapshot#NO_SNAPSHOT_ID} means no dstSnapshot (e.g., src of the
* first-time rename).
*/
private final int dstSnapshotId;
@ -587,25 +588,27 @@ public DstReference(INodeDirectory parent, WithCount referred,
}
@Override
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
public Quota.Counts cleanSubtree(int snapshot, int prior,
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
final boolean countDiffChange) throws QuotaExceededException {
if (snapshot == null && prior == null) {
if (snapshot == Snapshot.CURRENT_STATE_ID
&& prior == Snapshot.NO_SNAPSHOT_ID) {
Quota.Counts counts = Quota.Counts.newInstance();
this.computeQuotaUsage(counts, true);
destroyAndCollectBlocks(collectedBlocks, removedINodes);
return counts;
} else {
// if prior is null, we need to check snapshot belonging to the previous
// WithName instance
if (prior == null) {
// if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to
// the previous WithName instance
if (prior == Snapshot.NO_SNAPSHOT_ID) {
prior = getPriorSnapshot(this);
}
// if prior is not null, and prior is not before the to-be-deleted
// snapshot, we can quit here and leave the snapshot deletion work to
// the src tree of rename
if (snapshot != null && prior != null
&& Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) {
// if prior is not NO_SNAPSHOT_ID, and prior is not before the
// to-be-deleted snapshot, we can quit here and leave the snapshot
// deletion work to the src tree of rename
if (snapshot != Snapshot.CURRENT_STATE_ID
&& prior != Snapshot.NO_SNAPSHOT_ID
&& Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
return Quota.Counts.newInstance();
}
return getReferredINode().cleanSubtree(snapshot, prior,
@ -632,12 +635,12 @@ public void destroyAndCollectBlocks(
} else {
// we will clean everything, including files, directories, and
// snapshots, that were created after this prior snapshot
Snapshot prior = getPriorSnapshot(this);
int prior = getPriorSnapshot(this);
// prior must be non-null, otherwise we do not have any previous
// WithName nodes, and the reference number will be 0.
Preconditions.checkState(prior != null);
Preconditions.checkState(prior != Snapshot.NO_SNAPSHOT_ID);
// identify the snapshot created after prior
Snapshot snapshot = getSelfSnapshot(prior);
int snapshot = getSelfSnapshot(prior);
INode referred = getReferredINode().asReference().getReferredINode();
if (referred.isFile()) {
@ -671,23 +674,23 @@ public void destroyAndCollectBlocks(
}
}
private Snapshot getSelfSnapshot(final Snapshot prior) {
private int getSelfSnapshot(final int prior) {
WithCount wc = (WithCount) getReferredINode().asReference();
INode referred = wc.getReferredINode();
Snapshot lastSnapshot = null;
int lastSnapshot = Snapshot.CURRENT_STATE_ID;
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
lastSnapshot = referred.asFile().getDiffs().getLastSnapshot();
lastSnapshot = referred.asFile().getDiffs().getLastSnapshotId();
} else if (referred.isDirectory()) {
DirectoryWithSnapshotFeature sf = referred.asDirectory()
.getDirectoryWithSnapshotFeature();
if (sf != null) {
lastSnapshot = sf.getLastSnapshot();
lastSnapshot = sf.getLastSnapshotId();
}
}
if (lastSnapshot != null && !lastSnapshot.equals(prior)) {
if (lastSnapshot != Snapshot.CURRENT_STATE_ID && lastSnapshot != prior) {
return lastSnapshot;
} else {
return null;
return Snapshot.CURRENT_STATE_ID;
}
}
}

View File

@ -45,10 +45,10 @@ public class INodeSymlink extends INodeWithAdditionalFields {
}
@Override
INode recordModification(Snapshot latest) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
INode recordModification(int latestSnapshotId) throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)) {
INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this));
parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
}
return this;
}
@ -74,10 +74,11 @@ public byte[] getSymlink() {
}
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
public Quota.Counts cleanSubtree(final int snapshotId, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange) {
if (snapshot == null && prior == null) {
if (snapshotId == Snapshot.CURRENT_STATE_ID
&& priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
destroyAndCollectBlocks(collectedBlocks, removedINodes);
}
return Quota.Counts.newInstance(1, 0);
@ -105,7 +106,7 @@ public ContentSummaryComputationContext computeContentSummary(
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.println();
}

View File

@ -155,9 +155,9 @@ final void clonePermissionStatus(INodeWithAdditionalFields that) {
}
@Override
final PermissionStatus getPermissionStatus(Snapshot snapshot) {
return new PermissionStatus(getUserName(snapshot), getGroupName(snapshot),
getFsPermission(snapshot));
final PermissionStatus getPermissionStatus(int snapshotId) {
return new PermissionStatus(getUserName(snapshotId), getGroupName(snapshotId),
getFsPermission(snapshotId));
}
private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
@ -165,9 +165,9 @@ private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
}
@Override
final String getUserName(Snapshot snapshot) {
if (snapshot != null) {
return getSnapshotINode(snapshot).getUserName();
final String getUserName(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getUserName();
}
int n = (int)PermissionStatusFormat.USER.retrieve(permission);
@ -181,9 +181,9 @@ final void setUser(String user) {
}
@Override
final String getGroupName(Snapshot snapshot) {
if (snapshot != null) {
return getSnapshotINode(snapshot).getGroupName();
final String getGroupName(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getGroupName();
}
int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
@ -197,9 +197,9 @@ final void setGroup(String group) {
}
@Override
final FsPermission getFsPermission(Snapshot snapshot) {
if (snapshot != null) {
return getSnapshotINode(snapshot).getFsPermission();
final FsPermission getFsPermission(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getFsPermission();
}
return new FsPermission(getFsPermissionShort());
@ -221,9 +221,9 @@ public long getPermissionLong() {
}
@Override
final long getModificationTime(Snapshot snapshot) {
if (snapshot != null) {
return getSnapshotINode(snapshot).getModificationTime();
final long getModificationTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getModificationTime();
}
return this.modificationTime;
@ -232,13 +232,13 @@ final long getModificationTime(Snapshot snapshot) {
/** Update modification time if it is larger than the current value. */
@Override
public final INode updateModificationTime(long mtime, Snapshot latest)
public final INode updateModificationTime(long mtime, int latestSnapshotId)
throws QuotaExceededException {
Preconditions.checkState(isDirectory());
if (mtime <= modificationTime) {
return this;
}
return setModificationTime(mtime, latest);
return setModificationTime(mtime, latestSnapshotId);
}
final void cloneModificationTime(INodeWithAdditionalFields that) {
@ -251,11 +251,10 @@ public final void setModificationTime(long modificationTime) {
}
@Override
final long getAccessTime(Snapshot snapshot) {
if (snapshot != null) {
return getSnapshotINode(snapshot).getAccessTime();
final long getAccessTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAccessTime();
}
return accessTime;
}

View File

@ -135,8 +135,8 @@ static INodesInPath resolve(final INodeDirectory startingDir,
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!existing.isSnapshot()) {
existing.updateLatestSnapshot(dir.getDirectoryWithSnapshotFeature()
.getLastSnapshot());
existing.updateLatestSnapshotId(dir.getDirectoryWithSnapshotFeature()
.getLastSnapshotId());
}
} else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot:
@ -151,16 +151,17 @@ static INodesInPath resolve(final INodeDirectory startingDir,
// recordModification method.
if (!existing.isSnapshot()) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
Snapshot latest = existing.getLatestSnapshot();
if (latest == null || // no snapshot in dst tree of rename
dstSnapshotId >= latest.getId()) { // the above scenario
Snapshot lastSnapshot = null;
int latest = existing.getLatestSnapshotId();
if (latest == Snapshot.CURRENT_STATE_ID || // no snapshot in dst tree of rename
(dstSnapshotId != Snapshot.CURRENT_STATE_ID &&
dstSnapshotId >= latest)) { // the above scenario
int lastSnapshot = Snapshot.CURRENT_STATE_ID;
DirectoryWithSnapshotFeature sf = null;
if (curNode.isDirectory() &&
(sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshot();
lastSnapshot = sf.getLastSnapshotId();
}
existing.setSnapshot(lastSnapshot);
existing.setSnapshotId(lastSnapshot);
}
}
}
@ -206,14 +207,14 @@ static INodesInPath resolve(final INodeDirectory startingDir,
curNode = null;
} else {
curNode = s.getRoot();
existing.setSnapshot(s);
existing.setSnapshotId(s.getId());
}
if (index >= -1) {
existing.snapshotRootIndex = existing.numNonNull;
}
} else {
// normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName, existing.getPathSnapshot());
curNode = dir.getChild(childName, existing.getPathSnapshotId());
}
count++;
index++;
@ -245,11 +246,12 @@ static INodesInPath resolve(final INodeDirectory startingDir,
*/
private int snapshotRootIndex;
/**
* For snapshot paths, it is the reference to the snapshot; or null if the
* snapshot does not exist. For non-snapshot paths, it is the reference to
* the latest snapshot found in the path; or null if no snapshot is found.
* For snapshot paths, it is the id of the snapshot; or
* {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For
* non-snapshot paths, it is the id of the latest snapshot found in the path;
* or {@link Snapshot#CURRENT_STATE_ID} if no snapshot is found.
*/
private Snapshot snapshot = null;
private int snapshotId = Snapshot.CURRENT_STATE_ID;
private INodesInPath(byte[][] path, int number) {
this.path = path;
@ -262,29 +264,30 @@ private INodesInPath(byte[][] path, int number) {
}
/**
* For non-snapshot paths, return the latest snapshot found in the path.
* For snapshot paths, return null.
* For non-snapshot paths, return the latest snapshot id found in the path.
*/
public Snapshot getLatestSnapshot() {
return isSnapshot? null: snapshot;
public int getLatestSnapshotId() {
Preconditions.checkState(!isSnapshot);
return snapshotId;
}
/**
* For snapshot paths, return the snapshot specified in the path.
* For non-snapshot paths, return null.
* For snapshot paths, return the id of the snapshot specified in the path.
* For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}.
*/
public Snapshot getPathSnapshot() {
return isSnapshot? snapshot: null;
public int getPathSnapshotId() {
return isSnapshot ? snapshotId : Snapshot.CURRENT_STATE_ID;
}
private void setSnapshot(Snapshot s) {
snapshot = s;
private void setSnapshotId(int sid) {
snapshotId = sid;
}
private void updateLatestSnapshot(Snapshot s) {
if (snapshot == null
|| (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) {
snapshot = s;
private void updateLatestSnapshotId(int sid) {
if (snapshotId == Snapshot.CURRENT_STATE_ID
|| (sid != Snapshot.CURRENT_STATE_ID && Snapshot.ID_INTEGER_COMPARATOR
.compare(snapshotId, sid) < 0)) {
snapshotId = sid;
}
}
@ -386,7 +389,7 @@ private String toString(boolean vaildateObject) {
.append("\n capacity = ").append(capacity)
.append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotRootIndex = ").append(snapshotRootIndex)
.append("\n snapshot = ").append(snapshot);
.append("\n snapshotId = ").append(snapshotId);
return b.toString();
}

View File

@ -22,8 +22,8 @@
import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -52,8 +52,8 @@ abstract class AbstractINodeDiff<N extends INode,
D extends AbstractINodeDiff<N, A, D>>
implements Comparable<Integer> {
/** The snapshot will be obtained after this diff is applied. */
Snapshot snapshot;
/** The id of the corresponding snapshot. */
private int snapshotId;
/** The snapshot inode data. It is null when there is no change. */
A snapshotINode;
/**
@ -64,10 +64,8 @@ abstract class AbstractINodeDiff<N extends INode,
*/
private D posteriorDiff;
AbstractINodeDiff(Snapshot snapshot, A snapshotINode, D posteriorDiff) {
Preconditions.checkNotNull(snapshot, "snapshot is null");
this.snapshot = snapshot;
AbstractINodeDiff(int snapshotId, A snapshotINode, D posteriorDiff) {
this.snapshotId = snapshotId;
this.snapshotINode = snapshotINode;
this.posteriorDiff = posteriorDiff;
}
@ -75,16 +73,16 @@ abstract class AbstractINodeDiff<N extends INode,
/** Compare diffs with snapshot ID. */
@Override
public final int compareTo(final Integer that) {
return Snapshot.ID_INTEGER_COMPARATOR.compare(this.snapshot.getId(), that);
return Snapshot.ID_INTEGER_COMPARATOR.compare(this.snapshotId, that);
}
/** @return the snapshot object of this diff. */
public final Snapshot getSnapshot() {
return snapshot;
public final int getSnapshotId() {
return snapshotId;
}
final void setSnapshot(Snapshot snapshot) {
this.snapshot = snapshot;
final void setSnapshotId(int snapshot) {
this.snapshotId = snapshot;
}
/** @return the posterior diff. */
@ -132,13 +130,12 @@ abstract Quota.Counts destroyDiffAndCollectBlocks(final N currentINode,
@Override
public String toString() {
return getClass().getSimpleName() + ": " + snapshot + " (post="
+ (posteriorDiff == null? null: posteriorDiff.snapshot) + ")";
return getClass().getSimpleName() + ": " + this.getSnapshotId() + " (post="
+ (posteriorDiff == null? null: posteriorDiff.getSnapshotId()) + ")";
}
void writeSnapshot(DataOutput out) throws IOException {
// Assume the snapshot is recorded before, write id only.
out.writeInt(snapshot.getId());
out.writeInt(snapshotId);
}
abstract void write(DataOutput out, ReferenceMap referenceMap

View File

@ -53,7 +53,7 @@ public void clear() {
}
/** @return an {@link AbstractINodeDiff}. */
abstract D createDiff(Snapshot snapshot, N currentINode);
abstract D createDiff(int snapshotId, N currentINode);
/** @return a snapshot copy of the current inode. */
abstract A createSnapshotCopy(N currentINode);
@ -63,25 +63,25 @@ public void clear() {
* outside. If the diff to remove is not the first one in the diff list, we
* need to combine the diff with its previous one.
*
* @param snapshot The snapshot to be deleted
* @param prior The snapshot taken before the to-be-deleted snapshot
* @param snapshot The id of the snapshot to be deleted
* @param prior The id of the snapshot taken before the to-be-deleted snapshot
* @param collectedBlocks Used to collect information for blocksMap update
* @return delta in namespace.
*/
public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
Snapshot prior, final N currentINode,
public final Quota.Counts deleteSnapshotDiff(final int snapshot,
final int prior, final N currentINode,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, boolean countDiffChange)
throws QuotaExceededException {
int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId());
int snapshotIndex = Collections.binarySearch(diffs, snapshot);
Quota.Counts counts = Quota.Counts.newInstance();
D removed = null;
if (snapshotIndex == 0) {
if (prior != null) {
if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
// set the snapshot to latestBefore
diffs.get(snapshotIndex).setSnapshot(prior);
} else {
diffs.get(snapshotIndex).setSnapshotId(prior);
} else { // there is no snapshot before
removed = diffs.remove(0);
if (countDiffChange) {
counts.add(Quota.NAMESPACE, 1);
@ -96,8 +96,8 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
}
} else if (snapshotIndex > 0) {
final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
if (!previous.getSnapshot().equals(prior)) {
diffs.get(snapshotIndex).setSnapshot(prior);
if (previous.getSnapshotId() != prior) {
diffs.get(snapshotIndex).setSnapshotId(prior);
} else {
// combine the to-be-removed diff with its previous diff
removed = diffs.remove(snapshotIndex);
@ -120,10 +120,10 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
}
/** Add an {@link AbstractINodeDiff} for the given snapshot. */
final D addDiff(Snapshot latest, N currentINode)
final D addDiff(int latestSnapshotId, N currentINode)
throws QuotaExceededException {
currentINode.addSpaceConsumed(1, 0, true);
return addLast(createDiff(latest, currentINode));
return addLast(createDiff(latestSnapshotId, currentINode));
}
/** Append the diff at the end of the list. */
@ -149,10 +149,10 @@ public final D getLast() {
return n == 0? null: diffs.get(n - 1);
}
/** @return the last snapshot. */
public final Snapshot getLastSnapshot() {
/** @return the id of the last snapshot. */
public final int getLastSnapshotId() {
final AbstractINodeDiff<N, A, D> last = getLast();
return last == null? null: last.getSnapshot();
return last == null ? Snapshot.CURRENT_STATE_ID : last.getSnapshotId();
}
/**
@ -161,60 +161,49 @@ public final Snapshot getLastSnapshot() {
* snapshot id.
* @param exclusive True means the returned snapshot's id must be < the given
* id, otherwise <=.
* @return The latest snapshot before the given snapshot.
* @return The id of the latest snapshot before the given snapshot.
*/
private final Snapshot getPrior(int anchorId, boolean exclusive) {
if (anchorId == Snapshot.INVALID_ID) {
return getLastSnapshot();
private final int getPrior(int anchorId, boolean exclusive) {
if (anchorId == Snapshot.CURRENT_STATE_ID) {
return getLastSnapshotId();
}
final int i = Collections.binarySearch(diffs, anchorId);
if (exclusive) { // must be the one before
if (i == -1 || i == 0) {
return null;
return Snapshot.NO_SNAPSHOT_ID;
} else {
int priorIndex = i > 0 ? i - 1 : -i - 2;
return diffs.get(priorIndex).getSnapshot();
return diffs.get(priorIndex).getSnapshotId();
}
} else { // the one, or the one before if not existing
if (i >= 0) {
return diffs.get(i).getSnapshot();
return diffs.get(i).getSnapshotId();
} else if (i < -1) {
return diffs.get(-i - 2).getSnapshot();
return diffs.get(-i - 2).getSnapshotId();
} else { // i == -1
return null;
return Snapshot.NO_SNAPSHOT_ID;
}
}
}
public final Snapshot getPrior(int snapshotId) {
public final int getPrior(int snapshotId) {
return getPrior(snapshotId, false);
}
/**
* Update the prior snapshot.
*/
final Snapshot updatePrior(Snapshot snapshot, Snapshot prior) {
int id = snapshot == null ? Snapshot.INVALID_ID : snapshot.getId();
Snapshot s = getPrior(id, true);
if (s != null &&
(prior == null || Snapshot.ID_COMPARATOR.compare(s, prior) > 0)) {
return s;
final int updatePrior(int snapshot, int prior) {
int p = getPrior(snapshot, true);
if (p != Snapshot.CURRENT_STATE_ID
&& Snapshot.ID_INTEGER_COMPARATOR.compare(p, prior) > 0) {
return p;
}
return prior;
}
/**
* @return the diff corresponding to the given snapshot.
* When the diff is null, it means that the current state and
* the corresponding snapshot state are the same.
*/
public final D getDiff(Snapshot snapshot) {
return getDiffById(snapshot == null ?
Snapshot.INVALID_ID : snapshot.getId());
}
private final D getDiffById(final int snapshotId) {
if (snapshotId == Snapshot.INVALID_ID) {
public final D getDiffById(final int snapshotId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
return null;
}
final int i = Collections.binarySearch(diffs, snapshotId);
@ -234,9 +223,9 @@ private final D getDiffById(final int snapshotId) {
* Search for the snapshot whose id is 1) no less than the given id,
* and 2) most close to the given id.
*/
public final Snapshot getSnapshotById(final int snapshotId) {
public final int getSnapshotById(final int snapshotId) {
D diff = getDiffById(snapshotId);
return diff == null ? null : diff.getSnapshot();
return diff == null ? Snapshot.CURRENT_STATE_ID : diff.getSnapshotId();
}
/**
@ -271,8 +260,8 @@ final boolean changedBetweenSnapshots(Snapshot earlier, Snapshot later) {
* Note that the current inode is returned if there is no change
* between the given snapshot and the current state.
*/
public A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
final D diff = getDiff(snapshot);
public A getSnapshotINode(final int snapshotId, final A currentINode) {
final D diff = getDiffById(snapshotId);
final A inode = diff == null? null: diff.getSnapshotINode();
return inode == null? currentINode: inode;
}
@ -281,15 +270,16 @@ public A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
* Check if the latest snapshot diff exists. If not, add it.
* @return the latest snapshot diff, which is never null.
*/
final D checkAndAddLatestSnapshotDiff(Snapshot latest, N currentINode)
final D checkAndAddLatestSnapshotDiff(int latestSnapshotId, N currentINode)
throws QuotaExceededException {
final D last = getLast();
if (last != null
&& Snapshot.ID_COMPARATOR.compare(last.getSnapshot(), latest) >= 0) {
&& Snapshot.ID_INTEGER_COMPARATOR.compare(last.getSnapshotId(),
latestSnapshotId) >= 0) {
return last;
} else {
try {
return addDiff(latest, currentINode);
return addDiff(latestSnapshotId, currentINode);
} catch(NSQuotaExceededException e) {
e.setMessagePrefix("Failed to record modification for snapshot");
throw e;
@ -298,10 +288,10 @@ final D checkAndAddLatestSnapshotDiff(Snapshot latest, N currentINode)
}
/** Save the snapshot copy to the latest snapshot. */
public void saveSelf2Snapshot(Snapshot latest, N currentINode, A snapshotCopy)
throws QuotaExceededException {
if (latest != null) {
D diff = checkAndAddLatestSnapshotDiff(latest, currentINode);
public void saveSelf2Snapshot(int latestSnapshotId, N currentINode,
A snapshotCopy) throws QuotaExceededException {
if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
D diff = checkAndAddLatestSnapshotDiff(latestSnapshotId, currentINode);
if (diff.snapshotINode == null) {
if (snapshotCopy == null) {
snapshotCopy = createSnapshotCopy(currentINode);

View File

@ -225,30 +225,36 @@ public static class DirectoryDiff extends
private final int childrenSize;
/** The children list diff. */
private final ChildrenDiff diff;
private boolean isSnapshotRoot = false;
private DirectoryDiff(int snapshotId, INodeDirectory dir) {
super(snapshotId, null, null);
private DirectoryDiff(Snapshot snapshot, INodeDirectory dir) {
super(snapshot, null, null);
this.childrenSize = dir.getChildrenList(null).size();
this.childrenSize = dir.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
this.diff = new ChildrenDiff();
}
/** Constructor used by FSImage loading */
DirectoryDiff(Snapshot snapshot, INodeDirectoryAttributes snapshotINode,
DirectoryDiff posteriorDiff, int childrenSize,
List<INode> createdList, List<INode> deletedList) {
super(snapshot, snapshotINode, posteriorDiff);
DirectoryDiff(int snapshotId, INodeDirectoryAttributes snapshotINode,
DirectoryDiff posteriorDiff, int childrenSize, List<INode> createdList,
List<INode> deletedList, boolean isSnapshotRoot) {
super(snapshotId, snapshotINode, posteriorDiff);
this.childrenSize = childrenSize;
this.diff = new ChildrenDiff(createdList, deletedList);
this.isSnapshotRoot = isSnapshotRoot;
}
ChildrenDiff getChildrenDiff() {
return diff;
}
/** Is the inode the root of the snapshot? */
void setSnapshotRoot(INodeDirectoryAttributes root) {
this.snapshotINode = root;
this.isSnapshotRoot = true;
}
boolean isSnapshotRoot() {
return snapshotINode == snapshot.getRoot();
return isSnapshotRoot;
}
@Override
@ -287,7 +293,7 @@ private List<INode> initChildren() {
combined.combinePosterior(d.diff, null);
}
children = combined.apply2Current(ReadOnlyList.Util.asList(
currentDir.getChildrenList(null)));
currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
}
return children;
}
@ -327,7 +333,7 @@ INode getChild(byte[] name, boolean checkPosterior,
return null;
} else if (d.getPosterior() == null) {
// no more posterior diff, get from current inode.
return currentDir.getChild(name, null);
return currentDir.getChild(name, Snapshot.CURRENT_STATE_ID);
}
}
}
@ -342,11 +348,9 @@ void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeInt(childrenSize);
// write snapshotINode
if (isSnapshotRoot()) {
out.writeBoolean(true);
} else {
out.writeBoolean(false);
// Write snapshotINode
out.writeBoolean(isSnapshotRoot);
if (!isSnapshotRoot) {
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out);
@ -373,7 +377,7 @@ public static class DirectoryDiffList
extends AbstractINodeDiffList<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
@Override
DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) {
DirectoryDiff createDiff(int snapshot, INodeDirectory currentDir) {
return new DirectoryDiff(snapshot, currentDir);
}
@ -424,12 +428,13 @@ private static Map<INode, INode> cloneDiffList(List<INode> diffList) {
/**
* Destroy a subtree under a DstReference node.
*/
public static void destroyDstSubtree(INode inode, final Snapshot snapshot,
final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
public static void destroyDstSubtree(INode inode, final int snapshot,
final int prior, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) throws QuotaExceededException {
Preconditions.checkArgument(prior != null);
Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
if (inode.isReference()) {
if (inode instanceof INodeReference.WithName && snapshot != null) {
if (inode instanceof INodeReference.WithName
&& snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference
// subtree
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
@ -447,18 +452,18 @@ public static void destroyDstSubtree(INode inode, final Snapshot snapshot,
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
DirectoryDiffList diffList = sf.getDiffs();
DirectoryDiff priorDiff = diffList.getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
DirectoryDiff priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
excludedNodes = cloneDiffList(dList);
}
if (snapshot != null) {
if (snapshot != Snapshot.CURRENT_STATE_ID) {
diffList.deleteSnapshotDiff(snapshot, prior, dir, collectedBlocks,
removedINodes, true);
}
priorDiff = diffList.getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorDiff.diff.destroyCreatedList(dir, collectedBlocks,
removedINodes);
}
@ -478,14 +483,14 @@ public static void destroyDstSubtree(INode inode, final Snapshot snapshot,
* deleted list of prior.
* @param inode The inode to clean.
* @param post The post snapshot.
* @param prior The prior snapshot.
* @param prior The id of the prior snapshot.
* @param collectedBlocks Used to collect blocks for later deletion.
* @return Quota usage update.
*/
private static Quota.Counts cleanDeletedINode(INode inode,
final Snapshot post, final Snapshot prior,
final int post, final int prior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
Deque<INode> queue = new ArrayDeque<INode>();
@ -494,7 +499,7 @@ private static Quota.Counts cleanDeletedINode(INode inode,
INode topNode = queue.pollFirst();
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post.getId()) {
if (wn.getLastSnapshotId() >= post) {
wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
countDiffChange);
}
@ -511,8 +516,8 @@ private static Quota.Counts cleanDeletedINode(INode inode,
if (sf != null) {
// delete files/dirs created after prior. Note that these
// files/dirs, along with inode, were deleted right after post.
DirectoryDiff priorDiff = sf.getDiffs().getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorChildrenDiff = priorDiff.getChildrenDiff();
counts.add(priorChildrenDiff.destroyCreatedList(dir,
collectedBlocks, removedINodes));
@ -540,8 +545,8 @@ public DirectoryWithSnapshotFeature(DirectoryDiffList diffs) {
}
/** @return the last snapshot. */
public Snapshot getLastSnapshot() {
return diffs.getLastSnapshot();
public int getLastSnapshotId() {
return diffs.getLastSnapshotId();
}
/** @return the snapshot diff list. */
@ -565,11 +570,13 @@ public void getSnapshotDirectory(List<INodeDirectory> snapshotDir) {
* to make sure that parent is in the given snapshot "latest".
*/
public boolean addChild(INodeDirectory parent, INode inode,
boolean setModTime, Snapshot latest) throws QuotaExceededException {
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff;
boolean setModTime, int latestSnapshotId) throws QuotaExceededException {
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
int undoInfo = diff.create(inode);
final boolean added = parent.addChild(inode, setModTime, null);
final boolean added = parent.addChild(inode, setModTime,
Snapshot.CURRENT_STATE_ID);
if (!added) {
diff.undoCreate(inode, undoInfo);
}
@ -581,7 +588,7 @@ public boolean addChild(INodeDirectory parent, INode inode,
* needs to make sure that parent is in the given snapshot "latest".
*/
public boolean removeChild(INodeDirectory parent, INode child,
Snapshot latest) throws QuotaExceededException {
int latestSnapshotId) throws QuotaExceededException {
// For a directory that is not a renamed node, if isInLatestSnapshot returns
// false, the directory is not in the latest snapshot, thus we do not need
// to record the removed child in any snapshot.
@ -593,7 +600,8 @@ public boolean removeChild(INodeDirectory parent, INode child,
// directory node cannot be in any snapshot (not in current tree, nor in
// previous src tree). Thus we do not need to record the removed child in
// any snapshot.
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff;
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
UndoInfo<INode> undoInfo = diff.delete(child);
final boolean removed = parent.removeChild(child);
@ -611,29 +619,29 @@ public boolean removeChild(INodeDirectory parent, INode child,
* for the snapshot and return it.
*/
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
final Snapshot snapshot) {
final DirectoryDiff diff = diffs.getDiff(snapshot);
final int snapshotId) {
final DirectoryDiff diff = diffs.getDiffById(snapshotId);
return diff != null ? diff.getChildrenList(currentINode) : currentINode
.getChildrenList(null);
.getChildrenList(Snapshot.CURRENT_STATE_ID);
}
public INode getChild(INodeDirectory currentINode, byte[] name,
Snapshot snapshot) {
final DirectoryDiff diff = diffs.getDiff(snapshot);
int snapshotId) {
final DirectoryDiff diff = diffs.getDiffById(snapshotId);
return diff != null ? diff.getChild(name, true, currentINode)
: currentINode.getChild(name, null);
: currentINode.getChild(name, Snapshot.CURRENT_STATE_ID);
}
/** Used to record the modification of a symlink node */
public INode saveChild2Snapshot(INodeDirectory currentINode,
final INode child, final Snapshot latest, final INode snapshotCopy)
final INode child, final int latestSnapshotId, final INode snapshotCopy)
throws QuotaExceededException {
Preconditions.checkArgument(!child.isDirectory(),
"child is a directory, child=%s", child);
Preconditions.checkArgument(latest != null);
Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest,
currentINode);
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(
latestSnapshotId, currentINode);
if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) {
// it was already saved in the latest snapshot earlier.
return child;
@ -656,7 +664,7 @@ public void clear(INodeDirectory currentINode,
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
for(DirectoryDiff d : diffs) {
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
deleted.computeQuotaUsage(counts, false, Snapshot.INVALID_ID);
deleted.computeQuotaUsage(counts, false, Snapshot.CURRENT_STATE_ID);
}
}
counts.add(Quota.NAMESPACE, diffs.asList().size());
@ -744,14 +752,14 @@ boolean computeDiffBetweenSnapshots(Snapshot fromSnapshot,
}
public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
final Snapshot snapshot, Snapshot prior,
final int snapshot, int prior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
Map<INode, INode> priorCreated = null;
Map<INode, INode> priorDeleted = null;
if (snapshot == null) { // delete the current directory
if (snapshot == Snapshot.CURRENT_STATE_ID) { // delete the current directory
currentINode.recordModification(prior);
// delete everything in created list
DirectoryDiff lastDiff = diffs.getLast();
@ -764,9 +772,9 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
prior = getDiffs().updatePrior(snapshot, prior);
// if there is a snapshot diff associated with prior, we need to record
// its original created and deleted list before deleting post
if (prior != null) {
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
if (prior != Snapshot.NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
priorCreated = cloneDiffList(cList);
@ -774,13 +782,13 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
}
}
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, currentINode,
collectedBlocks, removedINodes, countDiffChange));
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
currentINode, collectedBlocks, removedINodes, countDiffChange));
// check priorDiff again since it may be created during the diff deletion
if (prior != null) {
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
if (prior != Snapshot.NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
// For files/directories created between "prior" and "snapshot",
// we need to clear snapshot copies for "snapshot". Note that we must
// use null as prior in the cleanSubtree call. Files/directories that
@ -791,8 +799,8 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
for (INode cNode : priorDiff.getChildrenDiff().getList(
ListType.CREATED)) {
if (priorCreated.containsKey(cNode)) {
counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks,
removedINodes, countDiffChange));
counts.add(cNode.cleanSubtree(snapshot, Snapshot.NO_SNAPSHOT_ID,
collectedBlocks, removedINodes, countDiffChange));
}
}
}

View File

@ -38,15 +38,15 @@ public class FileDiff extends
/** The file size at snapshot creation time. */
private final long fileSize;
FileDiff(Snapshot snapshot, INodeFile file) {
super(snapshot, null, null);
FileDiff(int snapshotId, INodeFile file) {
super(snapshotId, null, null);
fileSize = file.computeFileSize();
}
/** Constructor used by FSImage loading */
FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
FileDiff(int snapshotId, INodeFileAttributes snapshotINode,
FileDiff posteriorDiff, long fileSize) {
super(snapshot, snapshotINode, posteriorDiff);
super(snapshotId, snapshotINode, posteriorDiff);
this.fileSize = fileSize;
}

View File

@ -25,8 +25,8 @@ public class FileDiffList extends
AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
@Override
FileDiff createDiff(Snapshot snapshot, INodeFile file) {
return new FileDiff(snapshot, file);
FileDiff createDiff(int snapshotId, INodeFile file) {
return new FileDiff(snapshotId, file);
}
@Override

View File

@ -78,22 +78,22 @@ public String getDetailedString() {
return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
}
public Quota.Counts cleanFile(final INodeFile file, final Snapshot snapshot,
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(prior);
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete the snapshot
prior = getDiffs().updatePrior(snapshot, prior);
return diffs.deleteSnapshotDiff(snapshot, prior, file, collectedBlocks,
removedINodes, countDiffChange);
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes, countDiffChange);
}
}

View File

@ -206,6 +206,15 @@ public Snapshot getSnapshot(byte[] snapshotName) {
return i < 0? null: snapshotsByNames.get(i);
}
Snapshot getSnapshotById(int sid) {
for (Snapshot s : snapshotsByNames) {
if (s.getId() == sid) {
return s;
}
}
return null;
}
/** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */
public ReadOnlyList<Snapshot> getSnapshotList() {
return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames);
@ -297,13 +306,14 @@ Snapshot addSnapshot(int id, String name) throws SnapshotException,
+ "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
}
final DirectoryDiff d = getDiffs().addDiff(s, this);
d.snapshotINode = s.getRoot();
final DirectoryDiff d = getDiffs().addDiff(id, this);
d.setSnapshotRoot(s.getRoot());
snapshotsByNames.add(-i - 1, s);
//set modification time
updateModificationTime(Time.now(), null);
s.getRoot().setModificationTime(getModificationTime(), null);
updateModificationTime(Time.now(), Snapshot.CURRENT_STATE_ID);
s.getRoot().setModificationTime(getModificationTime(),
Snapshot.CURRENT_STATE_ID);
return s;
}
@ -326,10 +336,10 @@ Snapshot removeSnapshot(String snapshotName,
+ ": the snapshot does not exist.");
} else {
final Snapshot snapshot = snapshotsByNames.get(i);
Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
int prior = Snapshot.findLatestSnapshot(this, snapshot.getId());
try {
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
removedINodes, true);
Quota.Counts counts = cleanSubtree(snapshot.getId(), prior,
collectedBlocks, removedINodes, true);
INodeDirectory parent = getParent();
if (parent != null) {
// there will not be any WithName node corresponding to the deleted
@ -425,8 +435,9 @@ private void computeDiffRecursively(INode node, List<byte[]> parentPath,
diffReport.addDirDiff(dir, relativePath, diff);
}
}
ReadOnlyList<INode> children = dir.getChildrenList(diffReport
.isFromEarlier() ? diffReport.to : diffReport.from);
ReadOnlyList<INode> children = dir.getChildrenList(
diffReport.isFromEarlier() ? Snapshot.getSnapshotId(diffReport.to) :
Snapshot.getSnapshotId(diffReport.from));
for (INode child : children) {
final byte[] name = child.getLocalNameBytes();
if (diff.searchIndex(ListType.CREATED, name) < 0
@ -454,16 +465,15 @@ private void computeDiffRecursively(INode node, List<byte[]> parentPath,
* Replace itself with {@link INodeDirectoryWithSnapshot} or
* {@link INodeDirectory} depending on the latest snapshot.
*/
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
INodeDirectory replaceSelf(final int latestSnapshotId, final INodeMap inodeMap)
throws QuotaExceededException {
if (latest == null) {
Preconditions.checkState(
getDirectoryWithSnapshotFeature().getLastSnapshot() == null,
"latest == null but getLastSnapshot() != null, this=%s", this);
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
Preconditions.checkState(getDirectoryWithSnapshotFeature()
.getLastSnapshotId() == Snapshot.CURRENT_STATE_ID, "this=%s", this);
}
INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
if (latest != null) {
dir.recordModification(latest);
if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
dir.recordModification(latestSnapshotId);
}
return dir;
}
@ -475,10 +485,10 @@ public String toDetailString() {
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
Snapshot snapshot) {
int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
if (snapshot == null) {
if (snapshot == Snapshot.CURRENT_STATE_ID) {
out.println();
out.print(prefix);
@ -494,7 +504,8 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
n++;
}
}
Preconditions.checkState(n == snapshotsByNames.size());
Preconditions.checkState(n == snapshotsByNames.size(), "#n=" + n
+ ", snapshotsByNames.size()=" + snapshotsByNames.size());
out.print(", #snapshot=");
out.println(n);
@ -522,8 +533,9 @@ public boolean hasNext() {
@Override
public SnapshotAndINode next() {
final Snapshot s = next.snapshot;
final SnapshotAndINode pair = new SnapshotAndINode(s);
final SnapshotAndINode pair = new SnapshotAndINode(next
.getSnapshotId(), getSnapshotById(next.getSnapshotId())
.getRoot());
next = findNext();
return pair;
}

View File

@ -37,7 +37,11 @@
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
public class Snapshot implements Comparable<byte[]> {
public static final int INVALID_ID = -1;
/**
* This id is used to indicate the current state (vs. snapshots)
*/
public static final int CURRENT_STATE_ID = Integer.MAX_VALUE - 1;
public static final int NO_SNAPSHOT_ID = -1;
/**
* The pattern for generating the default snapshot name.
@ -61,14 +65,18 @@ public static String getSnapshotPath(String snapshottableDir,
.toString();
}
/**
* Get the name of the given snapshot.
/**
* Get the name of the given snapshot.
* @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null
*/
static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : "";
}
public static int getSnapshotId(Snapshot s) {
return s == null ? CURRENT_STATE_ID : s.getId();
}
/**
* Compare snapshot with IDs, where null indicates the current status thus
@ -78,9 +86,8 @@ static String getSnapshotName(Snapshot s) {
= new Comparator<Snapshot>() {
@Override
public int compare(Snapshot left, Snapshot right) {
return ID_INTEGER_COMPARATOR.compare(
left == null? null: left.getId(),
right == null? null: right.getId());
return ID_INTEGER_COMPARATOR.compare(Snapshot.getSnapshotId(left),
Snapshot.getSnapshotId(right));
}
};
@ -92,12 +99,9 @@ public int compare(Snapshot left, Snapshot right) {
= new Comparator<Integer>() {
@Override
public int compare(Integer left, Integer right) {
// null means the current state, thus should be the largest
if (left == null) {
return right == null? 0: 1;
} else {
return right == null? -1: left - right;
}
// Snapshot.CURRENT_STATE_ID means the current state, thus should be the
// largest
return left - right;
}
};
@ -108,12 +112,12 @@ public int compare(Integer left, Integer right) {
* is not null).
*
* @param inode the given inode that the returned snapshot needs to cover
* @param anchor the returned snapshot should be taken before this snapshot.
* @return the latest snapshot covers the given inode and was taken before the
* the given snapshot (if it is not null).
* @param anchor the returned snapshot should be taken before this given id.
* @return id of the latest snapshot that covers the given inode and was taken
* before the the given snapshot (if it is not null).
*/
public static Snapshot findLatestSnapshot(INode inode, Snapshot anchor) {
Snapshot latest = null;
public static int findLatestSnapshot(INode inode, final int anchor) {
int latest = NO_SNAPSHOT_ID;
for(; inode != null; inode = inode.getParent()) {
if (inode.isDirectory()) {
final INodeDirectory dir = inode.asDirectory();
@ -139,13 +143,13 @@ static public class Root extends INodeDirectory {
}
@Override
public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) {
return getParent().getChildrenList(snapshot);
public ReadOnlyList<INode> getChildrenList(int snapshotId) {
return getParent().getChildrenList(snapshotId);
}
@Override
public INode getChild(byte[] name, Snapshot snapshot) {
return getParent().getChild(name, snapshot);
public INode getChild(byte[] name, int snapshotId) {
return getParent().getChild(name, snapshotId);
}
@Override

View File

@ -118,7 +118,7 @@ public static FileDiffList loadFileDiffList(DataInput in,
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot
// 1. Read the id of the Snapshot root to identify the Snapshot
final Snapshot snapshot = loader.getSnapshot(in);
// 2. Load file size
@ -128,7 +128,7 @@ private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
final INodeFileAttributes snapshotINode = in.readBoolean()?
loader.loadINodeFileAttributes(in): null;
return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
return new FileDiff(snapshot.getId(), snapshotINode, posterior, fileSize);
}
/**
@ -149,7 +149,8 @@ private static INode loadCreated(byte[] createdNodeName,
} // else go to the next SnapshotDiff
}
// use the current child
INode currentChild = parent.getChild(createdNodeName, null);
INode currentChild = parent.getChild(createdNodeName,
Snapshot.CURRENT_STATE_ID);
if (currentChild == null) {
throw new IOException("Cannot find an INode associated with the INode "
+ DFSUtil.bytes2String(createdNodeName)
@ -295,9 +296,9 @@ private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent,
// 6. Compose the SnapshotDiff
List<DirectoryDiff> diffs = parent.getDiffs().asList();
DirectoryDiff sdiff = new DirectoryDiff(snapshot, snapshotINode,
diffs.isEmpty() ? null : diffs.get(0),
childrenSize, createdList, deletedList);
DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
deletedList, snapshotINode == snapshot.getRoot());
return sdiff;
}

View File

@ -114,7 +114,7 @@ public void setSnapshottable(final String path, boolean checkNestedSnapshottable
s = (INodeDirectorySnapshottable)d;
s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
} else {
s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot(),
s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
fsdir.getINodeMap());
}
addSnapshottable(s);
@ -160,7 +160,7 @@ public void resetSnapshottable(final String path) throws IOException {
if (s == fsdir.getRoot()) {
s.setSnapshotQuota(0);
} else {
s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());
s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap());
}
removeSnapshottable(s);
}
@ -324,7 +324,8 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing(
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(),
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(null),
dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
dir.getNumSnapshots(),
dir.getSnapshotQuota(), dir.getParent() == null ?
DFSUtil.EMPTY_BYTES :

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -119,7 +120,7 @@ public void testReset() throws Exception {
fsdir.reset();
Assert.assertFalse(fsdir.isReady());
final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
Assert.assertTrue(root.getChildrenList(null).isEmpty());
Assert.assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
fsdir.imageLoadComplete();
Assert.assertTrue(fsdir.isReady());
}

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@ -43,6 +44,7 @@
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.log4j.Level;
@ -195,11 +197,12 @@ public void testSnapshotOnRoot() throws Exception {
INodeDirectorySnapshottable rootNode =
(INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
assertTrue("The children list of root should be empty",
rootNode.getChildrenList(null).isEmpty());
rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
// one snapshot on root: s1
List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
assertEquals(1, diffList.size());
assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName());
Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
// check SnapshotManager's snapshottable directory list
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());

View File

@ -60,6 +60,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
@ -781,7 +782,7 @@ private INode createTreeOfInodes(String path) throws QuotaExceededException {
}
System.out.println("Adding component " + DFSUtil.bytes2String(component));
dir = new INodeDirectory(++id, component, permstatus, 0);
prev.addChild(dir, false, null);
prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
prev = dir;
}
return dir; // Last Inode in the chain

View File

@ -123,8 +123,12 @@ static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
final Snapshot snapshot, int index) {
assertEquals(isSnapshot, inodesInPath.isSnapshot());
assertEquals(index, inodesInPath.getSnapshotRootIndex());
assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot());
assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot());
assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null),
inodesInPath.getPathSnapshotId());
if (!isSnapshot) {
assertEquals(Snapshot.getSnapshotId(snapshot),
inodesInPath.getLatestSnapshotId());
}
if (isSnapshot && index >= 0) {
assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass());
}
@ -424,7 +428,7 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
// The modification time of the snapshot INode should be the same with the
// original INode before modification
assertEquals(modTime,
snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshot()));
snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
// Check the INode for /TestSnapshot/sub1/file1 again
names = INode.getPathNames(file1.toString());

View File

@ -270,7 +270,8 @@ static void printFile(File f) throws IOException {
public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
final PrintWriter out = new PrintWriter(new FileWriter(f, false), true);
fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(), null);
fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
out.close();
}

View File

@ -156,7 +156,6 @@ public void testSnapshotWhileAppending() throws Exception {
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
DirectoryDiff last = dirNode.getDiffs().getLast();
Snapshot s0 = last.snapshot;
// 2. append without closing stream
out = appendFileWithoutClosing(file, BLOCKSIZE);
@ -164,7 +163,7 @@ public void testSnapshotWhileAppending() throws Exception {
// re-check nodeInDeleted_S0
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
// 3. take snapshot --> close stream
hdfs.createSnapshot(dir, "s1");
@ -175,9 +174,8 @@ public void testSnapshotWhileAppending() throws Exception {
fileNode = (INodeFile) fsdir.getINode(file.toString());
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
last = dirNode.getDiffs().getLast();
Snapshot s1 = last.snapshot;
assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
// 4. modify file --> append without closing stream --> take snapshot -->
// close stream
@ -187,7 +185,7 @@ public void testSnapshotWhileAppending() throws Exception {
out.close();
// re-check the size of nodeInDeleted_S1
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
}
/**

View File

@ -44,6 +44,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
@ -72,6 +73,7 @@
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
;
/** Testing rename with snapshots. */
public class TestRenameWithSnapshots {
@ -402,9 +404,11 @@ public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
assertEquals("s2", sfoo.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
/**
@ -604,8 +608,10 @@ public void testRenameFileAndDeleteSnapshot() throws Exception {
INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1, snode.getDiffs().asList().size());
assertEquals("s2", snode.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
// restart cluster
restartClusterAndCheckImage(true);
@ -758,12 +764,14 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
assertEquals(2, fooWithCount.getReferenceCount());
INodeDirectory foo = fooWithCount.asDirectory();
assertEquals(1, foo.getDiffs().asList().size());
assertEquals("s1", foo.getDirectoryWithSnapshotFeature().getLastSnapshot()
.getRoot().getLocalName());
INodeDirectorySnapshottable sdir1Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature()
.getLastSnapshotId());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1, bar1.getDiffs().asList().size());
assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
assertEquals(s1.getId(), bar1.getDiffs().getLastSnapshotId());
INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString())
.asReference();
@ -772,8 +780,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
assertEquals(2, barWithCount.getReferenceCount());
INodeFile bar = barWithCount.asFile();
assertEquals(1, bar.getDiffs().asList().size());
assertEquals("s1", bar.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
assertEquals(s1.getId(), bar.getDiffs().getLastSnapshotId());
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
@ -967,6 +974,13 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
hdfs.rename(bar_dir2, bar_dir1);
// check the internal details
INodeDirectorySnapshottable sdir1Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
INodeDirectorySnapshottable sdir3Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir3.toString());
INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString())
.asReference();
INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
@ -975,16 +989,22 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
INodeDirectory foo = fooWithCount.asDirectory();
List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size());
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
assertEquals("s333", fooDiffs.get(2).snapshot.getRoot().getLocalName());
assertEquals("s22", fooDiffs.get(1).snapshot.getRoot().getLocalName());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333 = sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(), fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List<FileDiff> bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size());
assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName());
assertEquals("s22", bar1Diffs.get(1).snapshot.getRoot().getLocalName());
assertEquals("s1", bar1Diffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(), bar1Diffs.get(0).getSnapshotId());
INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString())
.asReference();
@ -994,10 +1014,10 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
INodeFile bar = barWithCount.asFile();
List<FileDiff> barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size());
assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName());
assertEquals("s333", barDiffs.get(2).snapshot.getRoot().getLocalName());
assertEquals("s22", barDiffs.get(1).snapshot.getRoot().getLocalName());
assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(), barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(), barDiffs.get(0).getSnapshotId());
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
@ -1033,10 +1053,10 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
foo = fooWithCount.asDirectory();
fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size());
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size());
assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName());
assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
barRef = fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount = (WithCount) barRef.getReferredINode();
@ -1044,7 +1064,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
bar = barWithCount.asFile();
barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size());
assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName());
assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
}
/**
@ -1164,6 +1184,9 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception {
assertTrue(hdfs.exists(bar_s2));
// check internal details
INodeDirectorySnapshottable sdir2Node =
(INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
@ -1172,7 +1195,7 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception {
INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
assertEquals(1, diffs.size());
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
@ -1260,7 +1283,7 @@ public void testRenameUndo_1() throws Exception {
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject());
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
@ -1271,12 +1294,14 @@ public void testRenameUndo_1() throws Exception {
// check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString());
ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, both the created and deleted list of sdir1
// should be empty
@ -1288,7 +1313,7 @@ public void testRenameUndo_1() throws Exception {
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
@ -1299,7 +1324,8 @@ public void testRenameUndo_1() throws Exception {
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
@ -1327,7 +1353,7 @@ public void testRenameUndo_2() throws Exception {
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject());
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
@ -1338,12 +1364,14 @@ public void testRenameUndo_2() throws Exception {
// check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString());
ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, the created list of sdir1 should contain
// 1 element
@ -1363,7 +1391,8 @@ public void testRenameUndo_2() throws Exception {
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
@ -1389,7 +1418,7 @@ public void testRenameUndo_3() throws Exception {
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3);
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject());
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
@ -1400,13 +1429,18 @@ public void testRenameUndo_3() throws Exception {
assertFalse(result);
// check the current internal details
INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir1.toString());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir
.getINode4Write(sdir2.toString());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(1, dir2Diffs.size());
assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot));
assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
@ -1418,7 +1452,7 @@ public void testRenameUndo_3() throws Exception {
assertTrue(fooNode instanceof INodeReference.DstReference);
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
// create snapshot on sdir2 and rename again
hdfs.createSnapshot(sdir2, "s3");
@ -1428,13 +1462,14 @@ public void testRenameUndo_3() throws Exception {
// check internal details again
dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2
.toString());
Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode = fsdir.getINode4Write(foo_dir2.toString());
dir2Children = dir2Node.getChildrenList(null);
dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(2, dir2Diffs.size());
assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot));
assertEquals("s3", Snapshot.getSnapshotName(dir2Diffs.get(1).snapshot));
assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
@ -1452,8 +1487,8 @@ public void testRenameUndo_3() throws Exception {
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(2, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
}
/**
@ -1489,9 +1524,9 @@ public void testRenameUndo_4() throws Exception {
INodeDirectory mockDir3 = spy(dir3);
// fail the rename but succeed in undo
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
anyBoolean(), (Snapshot) anyObject());
anyBoolean(), Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(),
(Snapshot) anyObject())).thenReturn(false).thenCallRealMethod();
Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
@ -1551,7 +1586,7 @@ public void testRenameUndo_5() throws Exception {
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
.asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
.getChildrenList(null));
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
@ -1572,7 +1607,7 @@ public void testRenameUndo_5() throws Exception {
assertEquals(3, counts.get(Quota.NAMESPACE));
assertEquals(0, counts.get(Quota.DISKSPACE));
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
.getChildrenList(null));
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent());
@ -1627,7 +1662,7 @@ public void testRenameUndo_6() throws Exception {
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
.asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
.getChildrenList(null));
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
@ -1646,7 +1681,7 @@ public void testRenameUndo_6() throws Exception {
assertEquals(4, counts.get(Quota.NAMESPACE));
assertEquals(0, counts.get(Quota.DISKSPACE));
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
.getChildrenList(null));
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
@ -1690,14 +1725,18 @@ public void testRenameUndo_7() throws Exception {
}
// check
INodeDirectorySnapshottable rootNode = (INodeDirectorySnapshottable) fsdir
.getINode4Write(root.toString());
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
// this diff is generated while renaming
assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot));
Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(), diff.getSnapshotId());
// after undo, the diff should be empty
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
@ -1709,7 +1748,7 @@ public void testRenameUndo_7() throws Exception {
List<FileDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
FileDiff barDiff = barDiffList.get(0);
assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));
assertEquals(s1.getId(), barDiff.getSnapshotId());
// restart cluster multiple times to make sure the fsimage and edits log are
// correct. Note that when loading fsimage, foo and bar will be converted
@ -1941,12 +1980,14 @@ public void testRenameDirAndDeleteSnapshot_3() throws Exception {
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot));
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
@ -2009,14 +2050,16 @@ public void testRenameDirAndDeleteSnapshot_4() throws Exception {
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot));
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size());
@ -2134,11 +2177,12 @@ public void testRenameDirAndDeleteSnapshot_6() throws Exception {
// recordModification before the rename
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory();
INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
.get(0).asDirectory();
// bar should also be INodeDirectory (With Snapshot), and both of its diff
// list and children list are empty
assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(null).isEmpty());
assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
restartClusterAndCheckImage(true);
}
@ -2210,7 +2254,10 @@ public void testRenameDirAndDeleteSnapshot_7() throws Exception {
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
DirectoryDiff diff = barDiffList.get(0);
assertEquals("s0", Snapshot.getSnapshotName(diff.snapshot));
INodeDirectorySnapshottable testNode =
(INodeDirectorySnapshottable) fsdir.getINode4Write(test.toString());
Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(), diff.getSnapshotId());
// and file should be stored in the deleted list of this snapshot diff
assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED)
.get(0).getLocalName());
@ -2276,7 +2323,7 @@ public void testCleanDstReference() throws Exception {
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
"foo/bar");
INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0, barNode.getChildrenList(null).size());
assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List<DirectoryDiff> diffList = barNode.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -153,7 +154,9 @@ public void testClearQuota() throws Exception {
assertTrue(subNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
assertEquals(1, diffList.size());
assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot));
Snapshot s2 = ((INodeDirectorySnapshottable) dirNode).getSnapshot(DFSUtil
.string2Bytes("s2"));
assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, createdList.size());
assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));

View File

@ -268,7 +268,8 @@ public void testDeleteCurrentFileDirectory() throws Exception {
(INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
// should still be an INodeDirectory
assertEquals(INodeDirectory.class, snapshotNode.getClass());
ReadOnlyList<INode> children = snapshotNode.getChildrenList(null);
ReadOnlyList<INode> children = snapshotNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
// check 2 children: noChangeFile and metaChangeFile2
assertEquals(2, children.size());
INode noChangeFileSCopy = children.get(1);
@ -286,11 +287,11 @@ public void testDeleteCurrentFileDirectory() throws Exception {
// check the replication factor of metaChangeFile2SCopy
assertEquals(REPLICATION_1,
metaChangeFile2SCopy.getFileReplication(null));
metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,
metaChangeFile2SCopy.getFileReplication(snapshot1));
metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,
metaChangeFile2SCopy.getFileReplication(snapshot0));
metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
// Case 4: delete directory sub
// before deleting sub, we first create a new file under sub
@ -316,23 +317,25 @@ public void testDeleteCurrentFileDirectory() throws Exception {
assertTrue(snapshotNode4Sub.isWithSnapshot());
// the snapshot copy of sub has only one child subsub.
// newFile should have been destroyed
assertEquals(1, snapshotNode4Sub.getChildrenList(null).size());
assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID)
.size());
// but should have two children, subsub and noChangeDir, when s1 was taken
assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1).size());
assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
// check the snapshot copy of subsub, which is contained in the subtree of
// sub's snapshot copy
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0);
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(
Snapshot.CURRENT_STATE_ID).get(0);
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
// check the children of subsub
INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
children = snapshotSubsubDir.getChildrenList(null);
children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2, children.size());
assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
// only one child before snapshot s0
children = snapshotSubsubDir.getChildrenList(snapshot0);
children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
assertEquals(1, children.size());
INode child = children.get(0);
assertEquals(child.getLocalName(), metaChangeFile1.getName());
@ -341,11 +344,11 @@ public void testDeleteCurrentFileDirectory() throws Exception {
assertTrue(metaChangeFile1SCopy.isWithSnapshot());
assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(null));
metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(snapshot1));
metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,
metaChangeFile1SCopy.getFileReplication(snapshot0));
metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
/**
@ -474,9 +477,10 @@ public void testDeleteEarliestSnapshot2() throws Exception {
(INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList = dirNode.getDiffs();
assertEquals(1, diffList.asList().size());
assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory()
.getDiffs();
assertEquals(0, diffList.asList().size());

View File

@ -101,7 +101,8 @@ private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
assertEquals(names.length, listByTime.size());
for (int i = 0; i < listByTime.size(); i++) {
assertEquals(names[i], listByTime.get(i).getSnapshot().getRoot().getLocalName());
Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId());
assertEquals(names[i], s.getRoot().getLocalName());
}
}

View File

@ -150,7 +150,7 @@ private void checkSnapshotFileReplication(Path currentFile,
assertEquals(expectedBlockRep, ssInode.getBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshot()));
ssInode.getFileReplication(iip.getPathSnapshotId()));
}
}

View File

@ -55,6 +55,9 @@ Release 2.4.0 - UNRELEASED
YARN-1028. Added FailoverProxyProvider capability to ResourceManager to help
with RM failover. (Karthik Kambatla via vinodkv)
YARN-1029. Added embedded leader election in the ResourceManager. (Karthik
Kambatla via vinodkv)
IMPROVEMENTS
YARN-7. Support CPU resource for DistributedShell. (Junping Du via llu)
@ -197,6 +200,11 @@ Release 2.4.0 - UNRELEASED
YARN-1493. Changed ResourceManager and Scheduler interfacing to recognize
app-attempts separately from apps. (Jian He via vinodkv)
YARN-1482. Modified WebApplicationProxy to make it work across ResourceManager
fail-over. (Xuan Gong via vinodkv)
YARN-1568. Rename clusterid to clusterId in ActiveRMInfoProto (kasha)
OPTIMIZATIONS
BUG FIXES
@ -292,6 +300,12 @@ Release 2.4.0 - UNRELEASED
YARN-1560. Fixed TestYarnClient#testAMMRTokens failure with null AMRM token.
(Ted Yu via jianhe)
YARN-1409. NonAggregatingLogHandler can throw RejectedExecutionException
(Tsuyoshi OZAWA via jlowe)
YARN-1293. Fixed TestContainerLaunch#testInvalidEnvSyntaxDiagnostics failure
caused by non-English system locale. (Tsuyoshi OZAWA via jianhe)
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -51,6 +51,22 @@ public static boolean isHAEnabled(Configuration conf) {
YarnConfiguration.DEFAULT_RM_HA_ENABLED);
}
public static boolean isAutomaticFailoverEnabled(Configuration conf) {
return conf.getBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,
YarnConfiguration.DEFAULT_AUTO_FAILOVER_ENABLED);
}
public static boolean isAutomaticFailoverEnabledAndEmbedded(
Configuration conf) {
return isAutomaticFailoverEnabled(conf) &&
isAutomaticFailoverEmbedded(conf);
}
public static boolean isAutomaticFailoverEmbedded(Configuration conf) {
return conf.getBoolean(YarnConfiguration.AUTO_FAILOVER_EMBEDDED,
YarnConfiguration.DEFAULT_AUTO_FAILOVER_EMBEDDED);
}
/**
* Verify configuration for Resource Manager HA.
* @param conf Configuration
@ -162,8 +178,7 @@ public static Collection<String> getRMHAIds(Configuration conf) {
* @param conf Configuration. Please use verifyAndSetRMHAId to check.
* @return RM Id on success
*/
@VisibleForTesting
static String getRMHAId(Configuration conf) {
public static String getRMHAId(Configuration conf) {
return conf.get(YarnConfiguration.RM_HA_ID);
}

View File

@ -59,7 +59,7 @@ public class YarnConfiguration extends Configuration {
public static final String IPC_PREFIX = YARN_PREFIX + "ipc.";
/** Factory to create client IPC classes.*/
public static final String IPC_CLIENT_FACTORY_CLASS =
public static final String IPC_CLIENT_FACTORY_CLASS =
IPC_PREFIX + "client.factory.class";
public static final String DEFAULT_IPC_CLIENT_FACTORY_CLASS =
"org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl";
@ -87,6 +87,8 @@ public class YarnConfiguration extends Configuration {
////////////////////////////////
public static final String RM_PREFIX = "yarn.resourcemanager.";
public static final String RM_CLUSTER_ID = RM_PREFIX + "cluster-id";
/** The address of the applications manager interface in the RM.*/
public static final String RM_ADDRESS =
RM_PREFIX + "address";
@ -278,6 +280,36 @@ public class YarnConfiguration extends Configuration {
public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled";
public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false;
/** Zookeeper interaction configs */
public static final String RM_ZK_PREFIX = RM_PREFIX + "zk-";
public static final String RM_ZK_ADDRESS = RM_ZK_PREFIX + "address";
public static final String RM_ZK_NUM_RETRIES = RM_ZK_PREFIX + "num-retries";
public static final int DEFAULT_ZK_RM_NUM_RETRIES = 500;
public static final String RM_ZK_RETRY_INTERVAL_MS =
RM_ZK_PREFIX + "retry-interval-ms";
public static final long DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 2000;
public static final String RM_ZK_TIMEOUT_MS = RM_ZK_PREFIX + "timeout-ms";
public static final int DEFAULT_RM_ZK_TIMEOUT_MS = 10000;
public static final String RM_ZK_ACL = RM_ZK_PREFIX + "acl";
public static final String DEFAULT_RM_ZK_ACL = "world:anyone:rwcda";
public static final String ZK_STATE_STORE_PREFIX =
RM_PREFIX + "zk-state-store.";
/** Parent znode path under which ZKRMStateStore will create znodes */
public static final String ZK_RM_STATE_STORE_PARENT_PATH =
ZK_STATE_STORE_PREFIX + "parent-path";
public static final String DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH = "/rmstore";
/** Root node ACLs for fencing */
public static final String ZK_RM_STATE_STORE_ROOT_NODE_ACL =
ZK_STATE_STORE_PREFIX + "root-node.acl";
/** HA related configs */
public static final String RM_HA_PREFIX = RM_PREFIX + "ha.";
public static final String RM_HA_ENABLED = RM_HA_PREFIX + "enabled";
@ -296,6 +328,22 @@ public class YarnConfiguration extends Configuration {
HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS
: RM_WEBAPP_ADDRESS));
public static final String AUTO_FAILOVER_PREFIX =
RM_HA_PREFIX + "automatic-failover.";
public static final String AUTO_FAILOVER_ENABLED =
AUTO_FAILOVER_PREFIX + "enabled";
public static final boolean DEFAULT_AUTO_FAILOVER_ENABLED = false;
public static final String AUTO_FAILOVER_EMBEDDED =
AUTO_FAILOVER_PREFIX + "embedded";
public static final boolean DEFAULT_AUTO_FAILOVER_EMBEDDED = false;
public static final String AUTO_FAILOVER_ZK_BASE_PATH =
AUTO_FAILOVER_PREFIX + "zk-base-path";
public static final String DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH =
"/yarn-leader-election";
public static final String CLIENT_FAILOVER_PREFIX =
YARN_PREFIX + "client.failover-";
public static final String CLIENT_FAILOVER_PROXY_PROVIDER =
@ -334,36 +382,6 @@ public class YarnConfiguration extends Configuration {
+ "fs.state-store.retry-policy-spec";
public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC =
"2000, 500";
/**
* Comma separated host:port pairs, each corresponding to a ZK server for
* ZKRMStateStore
*/
public static final String ZK_STATE_STORE_PREFIX =
RM_PREFIX + "zk-state-store.";
public static final String ZK_RM_STATE_STORE_NUM_RETRIES =
ZK_STATE_STORE_PREFIX + "num-retries";
public static final int DEFAULT_ZK_RM_STATE_STORE_NUM_RETRIES = 500;
/** retry interval when connecting to zookeeper*/
public static final String ZK_RM_STATE_STORE_RETRY_INTERVAL_MS =
ZK_STATE_STORE_PREFIX + "retry-interval-ms";
public static final long DEFAULT_ZK_RM_STATE_STORE_RETRY_INTERVAL_MS = 2000;
public static final String ZK_RM_STATE_STORE_ADDRESS =
ZK_STATE_STORE_PREFIX + "address";
/** Timeout in millisec for ZK server connection for ZKRMStateStore */
public static final String ZK_RM_STATE_STORE_TIMEOUT_MS =
ZK_STATE_STORE_PREFIX + "timeout-ms";
public static final int DEFAULT_ZK_RM_STATE_STORE_TIMEOUT_MS = 60000;
/** Parent znode path under which ZKRMStateStore will create znodes */
public static final String ZK_RM_STATE_STORE_PARENT_PATH =
ZK_STATE_STORE_PREFIX + "parent-path";
public static final String DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH = "/rmstore";
/** ACL for znodes in ZKRMStateStore */
public static final String ZK_RM_STATE_STORE_ACL =
ZK_STATE_STORE_PREFIX + "acl";
public static final String DEFAULT_ZK_RM_STATE_STORE_ACL =
"world:anyone:rwcda";
public static final String ZK_RM_STATE_STORE_ROOT_NODE_ACL =
ZK_STATE_STORE_PREFIX + "root-node.acl";
/** The maximum number of completed applications RM keeps. */
public static final String RM_MAX_COMPLETED_APPLICATIONS =

View File

@ -133,3 +133,11 @@ message RMStateVersionProto {
optional int32 major_version = 1;
optional int32 minor_version = 2;
}
//////////////////////////////////////////////////////////////////
///////////// RM Failover related records ////////////////////////
//////////////////////////////////////////////////////////////////
message ActiveRMInfoProto {
required string clusterId = 1;
required string rmId = 2;
}

View File

@ -30,7 +30,13 @@
</properties>
<dependencies>
<dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</dependency>

View File

@ -24,41 +24,52 @@
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.ClientBaseWithFixes;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestRMFailover {
public class TestRMFailover extends ClientBaseWithFixes {
private static final Log LOG =
LogFactory.getLog(TestRMFailover.class.getName());
private static final HAServiceProtocol.StateChangeRequestInfo req =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER);
private static final String RM1_NODE_ID = "rm1";
private static final int RM1_PORT_BASE = 10000;
private static final String RM2_NODE_ID = "rm2";
private static final int RM2_PORT_BASE = 20000;
private static final HAServiceProtocol.StateChangeRequestInfo req =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER_FORCED);
private static Configuration conf;
private static MiniYARNCluster cluster;
private Configuration conf;
private MiniYARNCluster cluster;
private ApplicationId fakeAppId;
private static void setConfForRM(String rmId, String prefix, String value) {
private void setConfForRM(String rmId, String prefix, String value) {
conf.set(HAUtil.addSuffix(prefix, rmId), value);
}
private static void setRpcAddressForRM(String rmId, int base) {
private void setRpcAddressForRM(String rmId, int base) {
setConfForRM(rmId, YarnConfiguration.RM_ADDRESS, "0.0.0.0:" +
(base + YarnConfiguration.DEFAULT_RM_PORT));
setConfForRM(rmId, YarnConfiguration.RM_SCHEDULER_ADDRESS, "0.0.0.0:" +
@ -73,13 +84,9 @@ private static void setRpcAddressForRM(String rmId, int base) {
(base + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT));
}
private static AdminService getRMAdminService(int index) {
return
cluster.getResourceManager(index).getRMContext().getRMAdminService();
}
@BeforeClass
public static void setup() throws IOException {
@Before
public void setup() throws IOException {
fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
@ -87,27 +94,22 @@ public static void setup() throws IOException {
setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE);
conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
cluster.init(conf);
cluster.start();
cluster.getResourceManager(0).getRMContext().getRMAdminService()
.transitionToActive(req);
assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
}
@AfterClass
public static void teardown() {
@After
public void teardown() {
cluster.stop();
}
private void verifyClientConnection() {
int numRetries = 3;
while(numRetries-- > 0) {
Configuration conf = new YarnConfiguration(TestRMFailover.conf);
Configuration conf = new YarnConfiguration(this.conf);
YarnClient client = YarnClient.createYarnClient();
client.init(conf);
client.start();
@ -123,31 +125,131 @@ private void verifyClientConnection() {
fail("Client couldn't connect to the Active RM");
}
private void verifyConnections() throws InterruptedException, YarnException {
assertTrue("NMs failed to connect to the RM",
cluster.waitForNodeManagersToConnect(20000));
verifyClientConnection();
}
private AdminService getAdminService(int index) {
return cluster.getResourceManager(index).getRMContext().getRMAdminService();
}
private void explicitFailover() throws IOException {
int activeRMIndex = cluster.getActiveRMIndex();
int newActiveRMIndex = (activeRMIndex + 1) % 2;
getAdminService(activeRMIndex).transitionToStandby(req);
getAdminService(newActiveRMIndex).transitionToActive(req);
assertEquals("Failover failed", newActiveRMIndex, cluster.getActiveRMIndex());
}
private void failover()
throws IOException, InterruptedException, YarnException {
int activeRMIndex = cluster.getActiveRMIndex();
cluster.stopResourceManager(activeRMIndex);
assertEquals("Failover failed",
(activeRMIndex + 1) % 2, cluster.getActiveRMIndex());
cluster.restartResourceManager(activeRMIndex);
}
@Test
public void testExplicitFailover()
throws YarnException, InterruptedException, IOException {
assertTrue("NMs failed to connect to the RM",
cluster.waitForNodeManagersToConnect(5000));
verifyClientConnection();
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
verifyConnections();
// Failover to the second RM
getRMAdminService(0).transitionToStandby(req);
getRMAdminService(1).transitionToActive(req);
assertEquals("Wrong ResourceManager is active",
HAServiceProtocol.HAServiceState.ACTIVE,
getRMAdminService(1).getServiceStatus().getState());
assertTrue("NMs failed to connect to the RM",
cluster.waitForNodeManagersToConnect(5000));
verifyClientConnection();
explicitFailover();
verifyConnections();
// Failover back to the first RM
getRMAdminService(1).transitionToStandby(req);
getRMAdminService(0).transitionToActive(req);
assertEquals("Wrong ResourceManager is active",
HAServiceProtocol.HAServiceState.ACTIVE,
getRMAdminService(0).getServiceStatus().getState());
assertTrue("NMs failed to connect to the RM",
cluster.waitForNodeManagersToConnect(5000));
verifyClientConnection();
explicitFailover();
verifyConnections();
}
@Test
public void testAutomaticFailover()
throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, true);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_EMBEDDED, true);
conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 2000);
cluster.init(conf);
cluster.start();
assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
verifyConnections();
failover();
verifyConnections();
failover();
verifyConnections();
}
@Test
public void testWebAppProxyInStandAloneMode() throws YarnException,
InterruptedException, IOException {
WebAppProxyServer webAppProxyServer = new WebAppProxyServer();
try {
conf.set(YarnConfiguration.PROXY_ADDRESS, "0.0.0.0:9099");
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
verifyConnections();
webAppProxyServer.init(conf);
// Start webAppProxyServer
Assert.assertEquals(STATE.INITED, webAppProxyServer.getServiceState());
webAppProxyServer.start();
Assert.assertEquals(STATE.STARTED, webAppProxyServer.getServiceState());
URL wrongUrl = new URL("http://0.0.0.0:9099/proxy/" + fakeAppId);
HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl
.openConnection();
proxyConn.connect();
verifyExpectedException(proxyConn.getResponseMessage());
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyExpectedException(proxyConn.getResponseMessage());
} finally {
webAppProxyServer.stop();
}
}
@Test
public void testEmbeddedWebAppProxy() throws YarnException,
InterruptedException, IOException {
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
verifyConnections();
URL wrongUrl = new URL("http://0.0.0.0:18088/proxy/" + fakeAppId);
HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl
.openConnection();
proxyConn.connect();
verifyExpectedException(proxyConn.getResponseMessage());
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyExpectedException(proxyConn.getResponseMessage());
}
private void verifyExpectedException(String exceptionMessage){
assertTrue(exceptionMessage.contains(ApplicationNotFoundException.class
.getName()));
assertTrue(exceptionMessage
.contains("Application with id '" + fakeAppId + "' " +
"doesn't exist in RM."));
}
}

View File

@ -21,16 +21,19 @@
import org.apache.hadoop.ha.BadFencingConfigurationException;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.NodeFencer;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import java.io.IOException;
import java.net.InetSocketAddress;
public class RMHAServiceTarget extends HAServiceTarget {
private InetSocketAddress haAdminServiceAddress;
private final boolean autoFailoverEnabled;
private final InetSocketAddress haAdminServiceAddress;
public RMHAServiceTarget(YarnConfiguration conf)
throws IOException {
autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf);
haAdminServiceAddress = conf.getSocketAddr(
YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
@ -44,19 +47,23 @@ public InetSocketAddress getAddress() {
@Override
public InetSocketAddress getZKFCAddress() {
// TODO (YARN-1177): Hook up ZKFC information
return null;
// TODO (YARN-1177): ZKFC implementation
throw new UnsupportedOperationException("RMHAServiceTarget doesn't have " +
"a corresponding ZKFC address");
}
@Override
public NodeFencer getFencer() {
// TODO (YARN-1026): Hook up fencing implementation
return null;
}
@Override
public void checkFencingConfigured()
throws BadFencingConfigurationException {
// TODO (YARN-1026): Based on fencing implementation
public void checkFencingConfigured() throws BadFencingConfigurationException {
throw new BadFencingConfigurationException("Fencer not configured");
}
@Override
public boolean isAutoFailoverEnabled() {
return autoFailoverEnabled;
}
}

View File

@ -264,13 +264,20 @@
<property>
<description>Enable RM to recover state after starting. If true, then
yarn.resourcemanager.store.class must be specified</description>
yarn.resourcemanager.store.class must be specified. </description>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>false</value>
</property>
<property>
<description>The class to use as the persistent store.</description>
<description>The class to use as the persistent store.
If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
is used, the store is implicitly fenced; meaning a single ResourceManager
is able to use the store at any point in time. More details on this
implicit fencing, along with setting up appropriate ACLs is discussed
under yarn.resourcemanager.zk-state-store.root-node.acl.
</description>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value>
</property>
@ -291,31 +298,24 @@
</property>
<property>
<description>Host:Port of the ZooKeeper server where RM state will
be stored. This must be supplied when using
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
as the value for yarn.resourcemanager.store.class. ZKRMStateStore
is implicitly fenced, meaning a single ResourceManager is
able to use the store at any point in time. More details on this, along
with setting up appropriate ACLs is discussed under the description for
yarn.resourcemanager.zk-state-store.root-node.acl.</description>
<name>yarn.resourcemanager.zk-state-store.address</name>
<description>Host:Port of the ZooKeeper server to be used by the RM. This
must be supplied when using the ZooKeeper based implementation of the
RM state store and/or embedded automatic failover in a HA setting.
</description>
<name>yarn.resourcemanager.zk-address</name>
<!--value>127.0.0.1:2181</value-->
</property>
<property>
<description>Number of times ZKRMStateStore tries to connect to
ZooKeeper. This may be supplied when using
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
as the value for yarn.resourcemanager.store.class</description>
<name>yarn.resourcemanager.zk-state-store.num-retries</name>
<description>Number of times RM tries to connect to ZooKeeper.</description>
<name>yarn.resourcemanager.zk-num-retries</name>
<value>500</value>
</property>
<property>
<description>Retry interval in milliseconds when ZKRMStateStore tries to
connect to ZooKeeper.</description>
<name>yarn.resourcemanager.zk-state-store.retry-interval-ms</name>
<description>Retry interval in milliseconds when connecting to ZooKeeper.
</description>
<name>yarn.resourcemanager.zk-retry-interval-ms</name>
<value>2000</value>
</property>
@ -333,20 +333,14 @@
is managed by the ZooKeeper cluster itself, not by the client. This value is
used by the cluster to determine when the client's session expires.
Expirations happens when the cluster does not hear from the client within
the specified session timeout period (i.e. no heartbeat).
This may be supplied when using
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
as the value for yarn.resourcemanager.store.class</description>
<name>yarn.resourcemanager.zk-state-store.timeout-ms</name>
the specified session timeout period (i.e. no heartbeat).</description>
<name>yarn.resourcemanager.zk-timeout-ms</name>
<value>60000</value>
</property>
<property>
<description>ACL's to be used for ZooKeeper znodes.
This may be supplied when using
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
as the value for yarn.resourcemanager.store.class</description>
<name>yarn.resourcemanager.zk-state-store.acl</name>
<description>ACL's to be used for ZooKeeper znodes.</description>
<name>yarn.resourcemanager.zk-acl</name>
<value>world:anyone:rwcda</value>
</property>
@ -362,7 +356,7 @@
permissions.
By default, when this property is not set, we use the ACLs from
yarn.resourcemanager.zk-state-store.acl for shared admin access and
yarn.resourcemanager.zk-acl for shared admin access and
rm-address:cluster-timestamp for username-based exclusive create-delete
access.
@ -408,6 +402,36 @@
<value>false</value>
</property>
<property>
<description>Enable automatic failover.</description>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>false</value>
</property>
<property>
<description>Enable embedded automatic failover. The embedded elector
relies on the RM state store to handle fencing, and is primarily intended
to be used in conjunction with ZKRMStateStore.</description>
<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
<value>false</value>
</property>
<property>
<description>The base znode path to use for storing leader information,
when using ZooKeeper based leader election.</description>
<name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
<value>/yarn-leader-election</value>
</property>
<property>
<description>Name of the cluster. In a HA setting,
this is used to ensure the RM participates in leader
election fo this cluster and ensures it does not affect
other clusters</description>
<name>yarn.resourcemanager.cluster-id</name>
<!--value>yarn-cluster</value-->
</property>
<property>
<description>The list of RM nodes in the cluster when HA is
enabled. See description of yarn.resourcemanager.ha

View File

@ -23,6 +23,7 @@
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.RejectedExecutionException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -118,10 +119,17 @@ public void handle(LogHandlerEvent event) {
LOG.info("Scheduling Log Deletion for application: "
+ appFinishedEvent.getApplicationId() + ", with delay of "
+ this.deleteDelaySeconds + " seconds");
sched.schedule(
LogDeleterRunnable logDeleter =
new LogDeleterRunnable(appOwners.remove(appFinishedEvent
.getApplicationId()), appFinishedEvent.getApplicationId()),
this.deleteDelaySeconds, TimeUnit.SECONDS);
.getApplicationId()), appFinishedEvent.getApplicationId());
try {
sched.schedule(logDeleter, this.deleteDelaySeconds,
TimeUnit.SECONDS);
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
break;
default:
; // Ignore

View File

@ -260,8 +260,12 @@ public void testInvalidEnvSyntaxDiagnostics() throws IOException {
fos.flush();
fos.close();
// It is supposed that LANG is set as C.
Map<String, String> cmdEnv = new HashMap<String, String>();
cmdEnv.put("LANG", "C");
Shell.ShellCommandExecutor shexc
= new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()}, tmpDir);
= new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
tmpDir, cmdEnv);
String diagnostics = null;
try {
shexc.execute();

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
@ -202,6 +203,32 @@ public void testStop() throws Exception {
verify(logHandler.mockSched).shutdownNow();
}
@Test
public void testHandlingApplicationFinishedEvent() {
Configuration conf = new Configuration();
LocalDirsHandlerService dirsService = new LocalDirsHandlerService();
DeletionService delService = new DeletionService(null);
NonAggregatingLogHandler aggregatingLogHandler =
new NonAggregatingLogHandler(new InlineDispatcher(),
delService,
dirsService);
dirsService.init(conf);
dirsService.start();
delService.init(conf);
delService.start();
aggregatingLogHandler.init(conf);
aggregatingLogHandler.start();
ApplicationId appId = BuilderUtils.newApplicationId(1234, 1);
// It should NOT throw RejectedExecutionException
aggregatingLogHandler.handle(new LogHandlerAppFinishedEvent(appId));
aggregatingLogHandler.stop();
// It should NOT throw RejectedExecutionException after stopping
// handler service.
aggregatingLogHandler.handle(new LogHandlerAppFinishedEvent(appId));
}
private class NonAggregatingLogHandlerWithMockExecutor extends
NonAggregatingLogHandler {

View File

@ -20,11 +20,8 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -41,14 +38,16 @@
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@ -75,7 +74,7 @@
import com.google.protobuf.BlockingService;
public class AdminService extends AbstractService implements
public class AdminService extends CompositeService implements
HAServiceProtocol, ResourceManagerAdministrationProtocol {
private static final Log LOG = LogFactory.getLog(AdminService.class);
@ -84,6 +83,8 @@ public class AdminService extends AbstractService implements
private final ResourceManager rm;
private String rmId;
private boolean autoFailoverEnabled;
private Server server;
private InetSocketAddress masterServiceAddress;
private AccessControlList adminAcl;
@ -99,6 +100,15 @@ public AdminService(ResourceManager rm, RMContext rmContext) {
@Override
public synchronized void serviceInit(Configuration conf) throws Exception {
if (rmContext.isHAEnabled()) {
autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf);
if (autoFailoverEnabled) {
if (HAUtil.isAutomaticFailoverEmbedded(conf)) {
addIfService(createEmbeddedElectorService());
}
}
}
masterServiceAddress = conf.getSocketAddr(
YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
@ -162,6 +172,10 @@ protected void stopServer() throws Exception {
}
}
protected EmbeddedElectorService createEmbeddedElectorService() {
return new EmbeddedElectorService(rmContext);
}
private UserGroupInformation checkAccess(String method) throws IOException {
return RMServerUtils.verifyAccess(adminAcl, method, LOG);
}
@ -174,6 +188,43 @@ private UserGroupInformation checkAcls(String method) throws YarnException {
}
}
/**
* Check that a request to change this node's HA state is valid.
* In particular, verifies that, if auto failover is enabled, non-forced
* requests from the HAAdmin CLI are rejected, and vice versa.
*
* @param req the request to check
* @throws AccessControlException if the request is disallowed
*/
private void checkHaStateChange(StateChangeRequestInfo req)
throws AccessControlException {
switch (req.getSource()) {
case REQUEST_BY_USER:
if (autoFailoverEnabled) {
throw new AccessControlException(
"Manual failover for this ResourceManager is disallowed, " +
"because automatic failover is enabled.");
}
break;
case REQUEST_BY_USER_FORCED:
if (autoFailoverEnabled) {
LOG.warn("Allowing manual failover from " +
org.apache.hadoop.ipc.Server.getRemoteAddress() +
" even though automatic failover is enabled, because the user " +
"specified the force flag");
}
break;
case REQUEST_BY_ZKFC:
if (!autoFailoverEnabled) {
throw new AccessControlException(
"Request from ZK failover controller at " +
org.apache.hadoop.ipc.Server.getRemoteAddress() + " denied " +
"since automatic failover is not enabled");
}
break;
}
}
private synchronized boolean isRMActive() {
return HAServiceState.ACTIVE == rmContext.getHAServiceState();
}
@ -196,8 +247,7 @@ public synchronized void monitorHealth()
public synchronized void transitionToActive(
HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
UserGroupInformation user = checkAccess("transitionToActive");
// TODO (YARN-1177): When automatic failover is enabled,
// check if transition should be allowed for this request
checkHaStateChange(reqInfo);
try {
rm.transitionToActive();
RMAuditLogger.logSuccess(user.getShortUserName(),
@ -215,8 +265,7 @@ public synchronized void transitionToActive(
public synchronized void transitionToStandby(
HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
UserGroupInformation user = checkAccess("transitionToStandby");
// TODO (YARN-1177): When automatic failover is enabled,
// check if transition should be allowed for this request
checkHaStateChange(reqInfo);
try {
rm.transitionToStandby(true);
RMAuditLogger.logSuccess(user.getShortUserName(),
@ -394,7 +443,7 @@ void refreshServiceAcls(Configuration configuration,
public String[] getGroupsForUser(String user) throws IOException {
return UserGroupInformation.createRemoteUser(user).getGroupNames();
}
@Override
public UpdateNodeResourceResponse updateNodeResource(
UpdateNodeResourceRequest request) throws YarnException, IOException {

View File

@ -0,0 +1,209 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.ActiveStandbyElector;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ZKUtil;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.ACL;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class EmbeddedElectorService extends AbstractService
implements ActiveStandbyElector.ActiveStandbyElectorCallback {
private static final Log LOG =
LogFactory.getLog(EmbeddedElectorService.class.getName());
private static final HAServiceProtocol.StateChangeRequestInfo req =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_ZKFC);
private RMContext rmContext;
private byte[] localActiveNodeInfo;
private ActiveStandbyElector elector;
EmbeddedElectorService(RMContext rmContext) {
super(EmbeddedElectorService.class.getName());
this.rmContext = rmContext;
}
@Override
protected synchronized void serviceInit(Configuration conf)
throws Exception {
conf = conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf);
String zkQuorum = conf.get(YarnConfiguration.RM_ZK_ADDRESS);
if (zkQuorum == null) {
throw new YarnRuntimeException("Embedded automatic failover " +
"is enabled, but " + YarnConfiguration.RM_ZK_ADDRESS +
" is not set");
}
String rmId = HAUtil.getRMHAId(conf);
String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID);
if (clusterId == null) {
throw new YarnRuntimeException(YarnConfiguration.RM_CLUSTER_ID +
" is not specified!");
}
localActiveNodeInfo = createActiveNodeInfo(clusterId, rmId);
String zkBasePath = conf.get(YarnConfiguration.AUTO_FAILOVER_ZK_BASE_PATH,
YarnConfiguration.DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH);
String electionZNode = zkBasePath + "/" + clusterId;
long zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
String zkAclConf = conf.get(YarnConfiguration.RM_ZK_ACL,
YarnConfiguration.DEFAULT_RM_ZK_ACL);
List<ACL> zkAcls;
try {
zkAcls = ZKUtil.parseACLs(ZKUtil.resolveConfIndirection(zkAclConf));
} catch (ZKUtil.BadAclFormatException bafe) {
throw new YarnRuntimeException(
YarnConfiguration.RM_ZK_ACL + "has ill-formatted ACLs");
}
// TODO (YARN-1528): ZKAuthInfo to be set for rm-store and elector
List<ZKUtil.ZKAuthInfo> zkAuths = Collections.emptyList();
elector = new ActiveStandbyElector(zkQuorum, (int) zkSessionTimeout,
electionZNode, zkAcls, zkAuths, this);
elector.ensureParentZNode();
if (!isParentZnodeSafe(clusterId)) {
notifyFatalError(electionZNode + " znode has invalid data! "+
"Might need formatting!");
}
super.serviceInit(conf);
}
@Override
protected synchronized void serviceStart() throws Exception {
elector.joinElection(localActiveNodeInfo);
super.serviceStart();
}
@Override
protected synchronized void serviceStop() throws Exception {
elector.quitElection(false);
elector.terminateConnection();
super.serviceStop();
}
@Override
public synchronized void becomeActive() throws ServiceFailedException {
try {
rmContext.getRMAdminService().transitionToActive(req);
} catch (Exception e) {
throw new ServiceFailedException("RM could not transition to Active", e);
}
}
@Override
public synchronized void becomeStandby() {
try {
rmContext.getRMAdminService().transitionToStandby(req);
} catch (Exception e) {
LOG.error("RM could not transition to Standby", e);
}
}
@Override
public void enterNeutralMode() {
/**
* Possibly due to transient connection issues. Do nothing.
* TODO: Might want to keep track of how long in this state and transition
* to standby.
*/
}
@SuppressWarnings(value = "unchecked")
@Override
public synchronized void notifyFatalError(String errorMessage) {
rmContext.getDispatcher().getEventHandler().handle(
new RMFatalEvent(RMFatalEventType.EMBEDDED_ELECTOR_FAILED, errorMessage));
}
@Override
public synchronized void fenceOldActive(byte[] oldActiveData) {
if (LOG.isDebugEnabled()) {
LOG.debug("Request to fence old active being ignored, " +
"as embedded leader election doesn't support fencing");
}
}
private static byte[] createActiveNodeInfo(String clusterId, String rmId)
throws IOException {
return YarnServerResourceManagerServiceProtos.ActiveRMInfoProto
.newBuilder()
.setClusterId(clusterId)
.setRmId(rmId)
.build()
.toByteArray();
}
private synchronized boolean isParentZnodeSafe(String clusterId)
throws InterruptedException, IOException, KeeperException {
byte[] data;
try {
data = elector.getActiveData();
} catch (ActiveStandbyElector.ActiveNotFoundException e) {
// no active found, parent znode is safe
return true;
}
YarnServerResourceManagerServiceProtos.ActiveRMInfoProto proto;
try {
proto = YarnServerResourceManagerServiceProtos.ActiveRMInfoProto
.parseFrom(data);
} catch (InvalidProtocolBufferException e) {
LOG.error("Invalid data in ZK: " + StringUtils.byteToHexString(data));
return false;
}
// Check if the passed proto corresponds to an RM in the same cluster
if (!proto.getClusterId().equals(clusterId)) {
LOG.error("Mismatched cluster! The other RM seems " +
"to be from a different cluster. Current cluster = " + clusterId +
"Other RM's cluster = " + proto.getClusterId());
return false;
}
return true;
}
}

View File

@ -0,0 +1,37 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class RMFatalEvent extends AbstractEvent<RMFatalEventType> {
private String cause;
public RMFatalEvent(RMFatalEventType rmFatalEventType, String cause) {
super(rmFatalEventType);
this.cause = cause;
}
public RMFatalEvent(RMFatalEventType rmFatalEventType, Exception cause) {
super(rmFatalEventType);
this.cause = StringUtils.stringifyException(cause);
}
public String getCause() {return this.cause;}
}

View File

@ -15,9 +15,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery;
public enum RMStateStoreOperationFailedEventType {
FENCED, // Store operation failed because it was fenced
FAILED // Store operation failed for no known reason
package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public enum RMFatalEventType {
// Source <- Store
STATE_STORE_FENCED,
STATE_STORE_OP_FAILED,
// Source <- Embedded Elector
EMBEDDED_ELECTOR_FAILED
}

View File

@ -59,8 +59,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreFactory;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreOperationFailedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreOperationFailedEventType;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
@ -121,6 +119,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
*/
@VisibleForTesting
protected RMContextImpl rmContext;
private Dispatcher rmDispatcher;
@VisibleForTesting
protected AdminService adminService;
@ -134,7 +133,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
*/
protected RMActiveServices activeServices;
protected RMSecretManagerService rmSecretManagerService;
private Dispatcher rmDispatcher;
protected ResourceScheduler scheduler;
private ClientRMService clientRM;
@ -149,9 +147,12 @@ public class ResourceManager extends CompositeService implements Recoverable {
protected QueueACLsManager queueACLsManager;
private DelegationTokenRenewer delegationTokenRenewer;
private WebApp webApp;
private AppReportFetcher fetcher = null;
protected ResourceTrackerService resourceTracker;
private boolean recoveryEnabled;
private String webAppAddress;
/** End of Active services */
private Configuration conf;
@ -179,6 +180,13 @@ protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
this.rmContext = new RMContextImpl();
rmDispatcher = createDispatcher();
addIfService(rmDispatcher);
rmContext.setDispatcher(rmDispatcher);
rmDispatcher.register(RMFatalEventType.class,
new ResourceManager.RMFatalEventDispatcher(this.rmContext, this));
adminService = createAdminService();
addService(adminService);
rmContext.setRMAdminService(adminService);
@ -189,6 +197,8 @@ protected void serviceInit(Configuration conf) throws Exception {
}
createAndInitActiveServices();
webAppAddress = WebAppUtils.getRMWebAppURLWithoutScheme(conf);
super.serviceInit(conf);
}
@ -207,11 +217,6 @@ protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() {
return new SchedulerEventDispatcher(this.scheduler);
}
protected RMStateStoreOperationFailedEventDispatcher
createRMStateStoreOperationFailedEventDispatcher() {
return new RMStateStoreOperationFailedEventDispatcher(rmContext, this);
}
protected Dispatcher createDispatcher() {
return new AsyncDispatcher();
}
@ -297,10 +302,6 @@ class RMActiveServices extends CompositeService {
protected void serviceInit(Configuration configuration) throws Exception {
conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
rmDispatcher = createDispatcher();
addIfService(rmDispatcher);
rmContext.setDispatcher(rmDispatcher);
rmSecretManagerService = createRMSecretManagerService();
addService(rmSecretManagerService);
@ -332,8 +333,6 @@ protected void serviceInit(Configuration configuration) throws Exception {
try {
rmStore.init(conf);
rmStore.setRMDispatcher(rmDispatcher);
rmDispatcher.register(RMStateStoreOperationFailedEventType.class,
createRMStateStoreOperationFailedEventDispatcher());
} catch (Exception e) {
// the Exception from stateStore.init() needs to be handled for
// HA and we need to give up master status if we got fenced
@ -443,22 +442,12 @@ protected void serviceStart() throws Exception {
throw e;
}
}
startWepApp();
if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
int port = webApp.port();
WebAppUtils.setRMWebAppPort(conf, port);
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (webApp != null) {
webApp.stop();
}
DefaultMetricsSystem.shutdown();
@ -605,26 +594,23 @@ public void handle(SchedulerEvent event) {
}
@Private
public static class RMStateStoreOperationFailedEventDispatcher implements
EventHandler<RMStateStoreOperationFailedEvent> {
public static class RMFatalEventDispatcher
implements EventHandler<RMFatalEvent> {
private final RMContext rmContext;
private final ResourceManager rm;
public RMStateStoreOperationFailedEventDispatcher(RMContext rmContext,
ResourceManager resourceManager) {
public RMFatalEventDispatcher(
RMContext rmContext, ResourceManager resourceManager) {
this.rmContext = rmContext;
this.rm = resourceManager;
}
@Override
public void handle(RMStateStoreOperationFailedEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Received a " +
RMStateStoreOperationFailedEvent.class.getName() + " of type " +
event.getType().name());
}
if (event.getType() == RMStateStoreOperationFailedEventType.FENCED) {
public void handle(RMFatalEvent event) {
LOG.fatal("Received a " + RMFatalEvent.class.getName() + " of type " +
event.getType().name());
if (event.getType() == RMFatalEventType.STATE_STORE_FENCED) {
LOG.info("RMStateStore has been fenced");
if (rmContext.isHAEnabled()) {
try {
@ -633,14 +619,11 @@ public void handle(RMStateStoreOperationFailedEvent event) {
rm.transitionToStandby(true);
return;
} catch (Exception e) {
LOG.error("Failed to transition RM to Standby mode.");
LOG.fatal("Failed to transition RM to Standby mode.");
}
}
}
LOG.error("Shutting down RM on receiving a " +
RMStateStoreOperationFailedEvent.class.getName() + " of type " +
event.getType().name());
ExitUtil.terminate(1, event.getCause());
}
}
@ -764,12 +747,16 @@ protected void startWepApp() {
YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY)
.withHttpSpnegoKeytabKey(
YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.at(WebAppUtils.getRMWebAppURLWithoutScheme(conf));
.at(webAppAddress);
String proxyHostAndPort = WebAppUtils.getProxyHostAndPort(conf);
if(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf).
equals(proxyHostAndPort)) {
AppReportFetcher fetcher = new AppReportFetcher(conf, getClientRMService());
builder.withServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
if (HAUtil.isHAEnabled(conf)) {
fetcher = new AppReportFetcher(conf);
} else {
fetcher = new AppReportFetcher(conf, getClientRMService());
}
builder.withServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
builder.withAttribute(WebAppProxy.FETCHER_ATTRIBUTE, fetcher);
String[] proxyParts = proxyHostAndPort.split(":");
@ -866,6 +853,11 @@ protected void serviceStart() throws Exception {
transitionToActive();
}
startWepApp();
if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
int port = webApp.port();
WebAppUtils.setRMWebAppPort(conf, port);
}
super.serviceStart();
}
@ -876,6 +868,12 @@ protected void doSecureLogin() throws IOException {
@Override
protected void serviceStop() throws Exception {
if (webApp != null) {
webApp.stop();
}
if (fetcher != null) {
fetcher.stop();
}
super.serviceStop();
transitionToStandby(false);
rmContext.setHAServiceState(HAServiceState.STOPPING);

View File

@ -48,6 +48,8 @@
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent;
import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl;
@ -679,15 +681,13 @@ protected void handleStoreEvent(RMStateStoreEvent event) {
* @param failureCause the exception due to which the operation failed
*/
private void notifyStoreOperationFailed(Exception failureCause) {
RMStateStoreOperationFailedEventType type;
RMFatalEventType type;
if (failureCause instanceof StoreFencedException) {
type = RMStateStoreOperationFailedEventType.FENCED;
type = RMFatalEventType.STATE_STORE_FENCED;
} else {
type = RMStateStoreOperationFailedEventType.FAILED;
type = RMFatalEventType.STATE_STORE_OP_FAILED;
}
rmDispatcher.getEventHandler().handle(
new RMStateStoreOperationFailedEvent(type, failureCause));
rmDispatcher.getEventHandler().handle(new RMFatalEvent(type, failureCause));
}
@SuppressWarnings("unchecked")

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class RMStateStoreOperationFailedEvent
extends AbstractEvent<RMStateStoreOperationFailedEventType> {
private Exception cause;
RMStateStoreOperationFailedEvent(
RMStateStoreOperationFailedEventType type, Exception cause) {
super(type);
this.cause = cause;
}
public Exception getCause() {
return this.cause;
}
}

View File

@ -182,34 +182,34 @@ protected List<ACL> constructZkRootNodeACL(
@Override
public synchronized void initInternal(Configuration conf) throws Exception {
zkHostPort = conf.get(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS);
zkHostPort = conf.get(YarnConfiguration.RM_ZK_ADDRESS);
if (zkHostPort == null) {
throw new YarnRuntimeException("No server address specified for " +
"zookeeper state store for Resource Manager recovery. " +
YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS + " is not configured.");
YarnConfiguration.RM_ZK_ADDRESS + " is not configured.");
}
numRetries =
conf.getInt(YarnConfiguration.ZK_RM_STATE_STORE_NUM_RETRIES,
YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_NUM_RETRIES);
conf.getInt(YarnConfiguration.RM_ZK_NUM_RETRIES,
YarnConfiguration.DEFAULT_ZK_RM_NUM_RETRIES);
znodeWorkingPath =
conf.get(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH,
YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH);
zkSessionTimeout =
conf.getInt(YarnConfiguration.ZK_RM_STATE_STORE_TIMEOUT_MS,
YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_TIMEOUT_MS);
conf.getInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,
YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
zkRetryInterval =
conf.getLong(YarnConfiguration.ZK_RM_STATE_STORE_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_RETRY_INTERVAL_MS);
conf.getLong(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_ZK_RETRY_INTERVAL_MS);
// Parse authentication from configuration.
String zkAclConf =
conf.get(YarnConfiguration.ZK_RM_STATE_STORE_ACL,
YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_ACL);
conf.get(YarnConfiguration.RM_ZK_ACL,
YarnConfiguration.DEFAULT_RM_ZK_ACL);
zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf);
try {
zkAcl = ZKUtil.parseACLs(zkAclConf);
} catch (ZKUtil.BadAclFormatException bafe) {
LOG.error("Invalid format for " + YarnConfiguration.ZK_RM_STATE_STORE_ACL);
LOG.error("Invalid format for " + YarnConfiguration.RM_ZK_ACL);
throw bafe;
}

View File

@ -396,6 +396,11 @@ protected void startServer() {
protected void stopServer() {
// don't do anything
}
@Override
protected EmbeddedElectorService createEmbeddedElectorService() {
return null;
}
};
}

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.junit.Before;
@ -39,6 +40,7 @@
public class TestRMHA {
private Log LOG = LogFactory.getLog(TestRMHA.class);
private final Configuration configuration = new YarnConfiguration();
private MockRM rm = null;
private static final String STATE_ERR =
"ResourceManager is in wrong HA state";
@ -51,17 +53,13 @@ public class TestRMHA {
@Before
public void setUp() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
}
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID);
rm = new MockRM(conf);
rm.init(conf);
configuration.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID);
}
private void checkMonitorHealth() throws IOException {
@ -113,6 +111,9 @@ private void checkActiveRMFunctionality() throws IOException {
*/
@Test (timeout = 30000)
public void testStartAndTransitions() throws IOException {
Configuration conf = new YarnConfiguration(configuration);
rm = new MockRM(conf);
rm.init(conf);
StateChangeRequestInfo requestInfo = new StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER);
@ -162,4 +163,63 @@ public void testStartAndTransitions() throws IOException {
rm.areActiveServicesRunning());
checkMonitorHealth();
}
@Test
public void testTransitionsWhenAutomaticFailoverEnabled() throws IOException {
final String ERR_UNFORCED_REQUEST = "User request succeeded even when " +
"automatic failover is enabled";
Configuration conf = new YarnConfiguration(configuration);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, true);
rm = new MockRM(conf);
rm.init(conf);
rm.start();
StateChangeRequestInfo requestInfo = new StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER);
// Transition to standby
try {
rm.adminService.transitionToStandby(requestInfo);
fail(ERR_UNFORCED_REQUEST);
} catch (AccessControlException e) {
// expected
}
checkMonitorHealth();
checkStandbyRMFunctionality();
// Transition to active
try {
rm.adminService.transitionToActive(requestInfo);
fail(ERR_UNFORCED_REQUEST);
} catch (AccessControlException e) {
// expected
}
checkMonitorHealth();
checkStandbyRMFunctionality();
final String ERR_FORCED_REQUEST = "Forced request by user should work " +
"even if automatic failover is enabled";
requestInfo = new StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER_FORCED);
// Transition to standby
try {
rm.adminService.transitionToStandby(requestInfo);
} catch (AccessControlException e) {
fail(ERR_FORCED_REQUEST);
}
checkMonitorHealth();
checkStandbyRMFunctionality();
// Transition to active
try {
rm.adminService.transitionToActive(requestInfo);
} catch (AccessControlException e) {
fail(ERR_FORCED_REQUEST);
}
checkMonitorHealth();
checkActiveRMFunctionality();
}
}

View File

@ -92,7 +92,7 @@ public String getAppNode(String appId) {
public RMStateStore getRMStateStore() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
workingZnode = "/Test";
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS, hostPort);
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
this.client = createClient();
this.store = new TestZKRMStateStoreInternal(conf, workingZnode);
@ -140,7 +140,7 @@ private Configuration createHARMConf(
conf.set(YarnConfiguration.RM_HA_IDS, rmIds);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.RM_STORE, ZKRMStateStore.class.getName());
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS, hostPort);
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
conf.set(YarnConfiguration.RM_HA_ID, rmId);
for (String rpcAddress : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
for (String id : HAUtil.getRMHAIds(conf)) {

View File

@ -107,7 +107,7 @@ public void process(WatchedEvent event) {
public RMStateStore getRMStateStore(Configuration conf) throws Exception {
String workingZnode = "/Test";
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS, hostPort);
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
watcher = new TestForwardingWatcher();
this.store = new TestZKRMStateStore(conf, workingZnode);
@ -120,8 +120,8 @@ public void testZKClientRetry() throws Exception {
TestZKClient zkClientTester = new TestZKClient();
final String path = "/test";
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.ZK_RM_STATE_STORE_TIMEOUT_MS, 1000);
conf.setLong(YarnConfiguration.ZK_RM_STATE_STORE_RETRY_INTERVAL_MS, 100);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 1000);
conf.setLong(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS, 100);
final ZKRMStateStore store =
(ZKRMStateStore) zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher = new TestDispatcher();
@ -153,7 +153,7 @@ public void testZKClientDisconnectAndReconnect()
TestZKClient zkClientTester = new TestZKClient();
String path = "/test";
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.ZK_RM_STATE_STORE_TIMEOUT_MS, 100);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 100);
ZKRMStateStore store =
(ZKRMStateStore) zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher = new TestDispatcher();
@ -195,7 +195,7 @@ public void testZKSessionTimeout() throws Exception {
TestZKClient zkClientTester = new TestZKClient();
String path = "/test";
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.ZK_RM_STATE_STORE_TIMEOUT_MS, 100);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 100);
ZKRMStateStore store =
(ZKRMStateStore) zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher = new TestDispatcher();
@ -227,7 +227,7 @@ public void testZKSessionTimeout() throws Exception {
public void testSetZKAcl() {
TestZKClient zkClientTester = new TestZKClient();
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ACL, "world:anyone:rwca");
conf.set(YarnConfiguration.RM_ZK_ACL, "world:anyone:rwca");
try {
zkClientTester.store.zkClient.delete(zkClientTester.store
.znodeWorkingPath, -1);
@ -240,7 +240,7 @@ public void testSetZKAcl() {
public void testInvalidZKAclConfiguration() {
TestZKClient zkClientTester = new TestZKClient();
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ACL, "randomstring&*");
conf.set(YarnConfiguration.RM_ZK_ACL, "randomstring&*");
try {
zkClientTester.getRMStateStore(conf);
fail("ZKRMStateStore created with bad ACL");

View File

@ -58,6 +58,7 @@
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
@ -98,6 +99,7 @@ public class MiniYARNCluster extends CompositeService {
private boolean useFixedPorts;
private boolean useRpc = false;
private int failoverTimeout;
private ConcurrentMap<ApplicationAttemptId, Long> appMasters =
new ConcurrentHashMap<ApplicationAttemptId, Long>(16, 0.75f, 2);
@ -189,12 +191,15 @@ public void serviceInit(Configuration conf) throws Exception {
YarnConfiguration.DEFAULT_YARN_MINICLUSTER_FIXED_PORTS);
useRpc = conf.getBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC,
YarnConfiguration.DEFAULT_YARN_MINICLUSTER_USE_RPC);
failoverTimeout = conf.getInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,
YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
if (useRpc && !useFixedPorts) {
throw new YarnRuntimeException("Invalid configuration!" +
" Minicluster can use rpc only when configured to use fixed ports");
}
conf.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true);
if (resourceManagers.length > 1) {
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
if (conf.get(YarnConfiguration.RM_HA_IDS) == null) {
@ -218,6 +223,13 @@ protected void doSecureLogin() throws IOException {
// Don't try to login using keytab in the testcases.
}
};
if (!useFixedPorts) {
if (HAUtil.isHAEnabled(conf)) {
setHARMConfiguration(i, conf);
} else {
setNonHARMConfiguration(conf);
}
}
addService(new ResourceManagerWrapper(i));
}
for(int index = 0; index < nodeManagers.length; index++) {
@ -230,18 +242,103 @@ protected void doSecureLogin() throws IOException {
conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf));
}
private void setNonHARMConfiguration(Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
conf.set(YarnConfiguration.RM_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_ADMIN_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, hostname + ":0");
WebAppUtils.setRMWebAppHostnameAndPort(conf, hostname, 0);
}
private void setHARMConfiguration(final int index, Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
conf.set(HAUtil.addSuffix(confKey, rmIds[index]), hostname + ":0");
}
}
private synchronized void initResourceManager(int index, Configuration conf) {
if (HAUtil.isHAEnabled(conf)) {
conf.set(YarnConfiguration.RM_HA_ID, rmIds[index]);
}
resourceManagers[index].init(conf);
resourceManagers[index].getRMContext().getDispatcher().register(
RMAppAttemptEventType.class,
new EventHandler<RMAppAttemptEvent>() {
public void handle(RMAppAttemptEvent event) {
if (event instanceof RMAppAttemptRegistrationEvent) {
appMasters.put(event.getApplicationAttemptId(),
event.getTimestamp());
} else if (event instanceof RMAppAttemptUnregistrationEvent) {
appMasters.remove(event.getApplicationAttemptId());
}
}
});
}
private synchronized void startResourceManager(final int index) {
try {
Thread rmThread = new Thread() {
public void run() {
resourceManagers[index].start();
}
};
rmThread.setName("RM-" + index);
rmThread.start();
int waitCount = 0;
while (resourceManagers[index].getServiceState() == STATE.INITED
&& waitCount++ < 60) {
LOG.info("Waiting for RM to start...");
Thread.sleep(1500);
}
if (resourceManagers[index].getServiceState() != STATE.STARTED) {
// RM could have failed.
throw new IOException(
"ResourceManager failed to start. Final state is "
+ resourceManagers[index].getServiceState());
}
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
LOG.info("MiniYARN ResourceManager address: " +
getConfig().get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniYARN ResourceManager web address: " +
WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
}
@InterfaceAudience.Private
@VisibleForTesting
public synchronized void stopResourceManager(int index) {
if (resourceManagers[index] != null) {
resourceManagers[index].stop();
resourceManagers[index] = null;
}
}
@InterfaceAudience.Private
@VisibleForTesting
public synchronized void restartResourceManager(int index)
throws InterruptedException {
if (resourceManagers[index] != null) {
resourceManagers[index].stop();
resourceManagers[index] = null;
}
Configuration conf = getConfig();
resourceManagers[index] = new ResourceManager();
initResourceManager(index, getConfig());
startResourceManager(index);
}
public File getTestWorkDir() {
return testWorkDir;
}
/**
* In a HA cluster, go through all the RMs and find the Active RM. If none
* of them are active, wait upto 5 seconds for them to transition to Active.
* In a HA cluster, go through all the RMs and find the Active RM. In a
* non-HA cluster, return the index of the only RM.
*
* In an non-HA cluster, return the index of the only RM.
*
* @return index of the active RM or -1 if none of them transition to
* active even after 5 seconds of waiting
* @return index of the active RM or -1 if none of them turn active
*/
@InterfaceAudience.Private
@VisibleForTesting
@ -250,9 +347,12 @@ public int getActiveRMIndex() {
return 0;
}
int numRetriesForRMBecomingActive = 5;
int numRetriesForRMBecomingActive = failoverTimeout / 100;
while (numRetriesForRMBecomingActive-- > 0) {
for (int i = 0; i < resourceManagers.length; i++) {
if (resourceManagers[i] == null) {
continue;
}
try {
if (HAServiceProtocol.HAServiceState.ACTIVE ==
resourceManagers[i].getRMContext().getRMAdminService()
@ -265,7 +365,7 @@ public int getActiveRMIndex() {
}
}
try {
Thread.sleep(1000);
Thread.sleep(100);
} catch (InterruptedException e) {
throw new YarnRuntimeException("Interrupted while waiting for one " +
"of the ResourceManagers to become active");
@ -282,7 +382,7 @@ public ResourceManager getResourceManager() {
int activeRMIndex = getActiveRMIndex();
return activeRMIndex == -1
? null
: this.resourceManagers[getActiveRMIndex()];
: this.resourceManagers[activeRMIndex];
}
public ResourceManager getResourceManager(int i) {
@ -310,82 +410,21 @@ public ResourceManagerWrapper(int i) {
index = i;
}
private void setNonHARMConfiguration(Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
conf.set(YarnConfiguration.RM_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_ADMIN_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, hostname + ":0");
WebAppUtils.setRMWebAppHostnameAndPort(conf, hostname, 0);
}
private void setHARMConfiguration(Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
for (String id : HAUtil.getRMHAIds(conf)) {
conf.set(HAUtil.addSuffix(confKey, id), hostname + ":0");
}
}
}
@Override
protected synchronized void serviceInit(Configuration conf)
throws Exception {
conf.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true);
if (!useFixedPorts) {
if (HAUtil.isHAEnabled(conf)) {
setHARMConfiguration(conf);
} else {
setNonHARMConfiguration(conf);
}
}
if (HAUtil.isHAEnabled(conf)) {
conf.set(YarnConfiguration.RM_HA_ID, rmIds[index]);
}
resourceManagers[index].init(conf);
resourceManagers[index].getRMContext().getDispatcher().register
(RMAppAttemptEventType.class,
new EventHandler<RMAppAttemptEvent>() {
public void handle(RMAppAttemptEvent event) {
if (event instanceof RMAppAttemptRegistrationEvent) {
appMasters.put(event.getApplicationAttemptId(), event.getTimestamp());
} else if (event instanceof RMAppAttemptUnregistrationEvent) {
appMasters.remove(event.getApplicationAttemptId());
}
}
});
initResourceManager(index, conf);
super.serviceInit(conf);
}
@Override
protected synchronized void serviceStart() throws Exception {
try {
new Thread() {
public void run() {
resourceManagers[index].start();
}
}.start();
int waitCount = 0;
while (resourceManagers[index].getServiceState() == STATE.INITED
&& waitCount++ < 60) {
LOG.info("Waiting for RM to start...");
Thread.sleep(1500);
}
if (resourceManagers[index].getServiceState() != STATE.STARTED) {
// RM could have failed.
throw new IOException(
"ResourceManager failed to start. Final state is "
+ resourceManagers[index].getServiceState());
}
super.serviceStart();
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
startResourceManager(index);
LOG.info("MiniYARN ResourceManager address: " +
getConfig().get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniYARN ResourceManager web address: " +
WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
super.serviceStart();
}
private void waitForAppMastersToFinish(long timeoutMillis) throws InterruptedException {
@ -406,7 +445,6 @@ protected synchronized void serviceStop() throws Exception {
waitForAppMastersToFinish(5000);
resourceManagers[index].stop();
}
super.serviceStop();
if (Shell.WINDOWS) {
// On Windows, clean up the short temporary symlink that was created to
@ -420,6 +458,7 @@ protected synchronized void serviceStop() throws Exception {
testWorkDir.getAbsolutePath());
}
}
super.serviceStop();
}
}

View File

@ -19,21 +19,20 @@
package org.apache.hadoop.yarn.server.webproxy;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
/**
* This class abstracts away how ApplicationReports are fetched.
@ -50,16 +49,12 @@ public class AppReportFetcher {
*/
public AppReportFetcher(Configuration conf) {
this.conf = conf;
YarnRPC rpc = YarnRPC.create(this.conf);
InetSocketAddress rmAddress = conf.getSocketAddr(
YarnConfiguration.RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_PORT);
LOG.info("Connecting to ResourceManager at " + rmAddress);
applicationsManager =
(ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class,
rmAddress, this.conf);
LOG.info("Connected to ResourceManager at " + rmAddress);
try {
applicationsManager = ClientRMProxy.createRMProxy(conf,
ApplicationClientProtocol.class);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
/**
@ -91,4 +86,10 @@ public ApplicationReport getApplicationReport(ApplicationId appId)
.getApplicationReport(request);
return response.getApplicationReport();
}
public void stop() {
if (this.applicationsManager != null) {
RPC.stopProxy(this.applicationsManager);
}
}
}

View File

@ -117,6 +117,9 @@ protected void serviceStop() throws Exception {
throw new YarnRuntimeException("Error stopping proxy web server",e);
}
}
if(this.fetcher != null) {
this.fetcher.stop();
}
super.serviceStop();
}