HDFS-14922. Prevent snapshot modification time got change on startup. Contributed by hemanthboyina.

This commit is contained in:
Inigo Goiri 2019-11-12 10:53:54 -08:00
parent 990063d2af
commit 40150da1e1
9 changed files with 80 additions and 19 deletions

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.Time;
import java.io.IOException;
import java.util.ArrayList;
@ -104,16 +105,18 @@ class FSDirSnapshotOp {
String snapshotPath;
verifySnapshotName(fsd, snapshotName, snapshotRoot);
// time of snapshot creation
final long now = Time.now();
fsd.writeLock();
try {
snapshotPath = snapshotManager.createSnapshot(
fsd.getFSNamesystem().getLeaseManager(),
iip, snapshotRoot, snapshotName);
iip, snapshotRoot, snapshotName, now);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName,
logRetryCache);
logRetryCache, now);
return snapshotPath;
}

View File

@ -1116,10 +1116,19 @@ public class FSEditLog implements LogsPurgeable {
.setNewHolder(newHolder);
logEdit(op);
}
void logCreateSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
/**
* Log that a snapshot is created.
* @param snapRoot Root of the snapshot.
* @param snapName Name of the snapshot.
* @param toLogRpcIds If it is logging RPC ids.
* @param mtime The snapshot creation time set by Time.now().
*/
void logCreateSnapshot(String snapRoot, String snapName, boolean toLogRpcIds,
long mtime) {
CreateSnapshotOp op = CreateSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(snapRoot).setSnapshotName(snapName);
.setSnapshotRoot(snapRoot).setSnapshotName(snapName)
.setSnapshotMTime(mtime);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}

View File

@ -801,7 +801,8 @@ public class FSEditLogLoader {
INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
String path = fsNamesys.getSnapshotManager().createSnapshot(
fsDir.getFSNamesystem().getLeaseManager(),
iip, snapshotRoot, createSnapshotOp.snapshotName);
iip, snapshotRoot, createSnapshotOp.snapshotName,
createSnapshotOp.mtime);
if (toAddRetryCache) {
fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId,
createSnapshotOp.rpcCallId, path);

View File

@ -3437,6 +3437,8 @@ public abstract class FSEditLogOp {
static class CreateSnapshotOp extends FSEditLogOp {
String snapshotRoot;
String snapshotName;
/** Modification time of the edit set by Time.now(). */
long mtime;
public CreateSnapshotOp() {
super(OP_CREATE_SNAPSHOT);
@ -3450,22 +3452,32 @@ public abstract class FSEditLogOp {
void resetSubFields() {
snapshotRoot = null;
snapshotName = null;
mtime = 0L;
}
/* set the name of the snapshot. */
CreateSnapshotOp setSnapshotName(String snapName) {
this.snapshotName = snapName;
return this;
}
/* set the directory path where the snapshot is taken. */
public CreateSnapshotOp setSnapshotRoot(String snapRoot) {
snapshotRoot = snapRoot;
return this;
}
/* The snapshot creation time set by Time.now(). */
CreateSnapshotOp setSnapshotMTime(long mTime) {
this.mtime = mTime;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
snapshotName = FSImageSerialization.readString(in);
mtime = FSImageSerialization.readLong(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
@ -3475,6 +3487,7 @@ public abstract class FSEditLogOp {
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
FSImageSerialization.writeString(snapshotName, out);
FSImageSerialization.writeLong(mtime, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@ -3482,6 +3495,7 @@ public abstract class FSEditLogOp {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
XMLUtils.addSaxString(contentHandler, "MTIME", Long.toString(mtime));
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@ -3489,6 +3503,7 @@ public abstract class FSEditLogOp {
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
snapshotName = st.getValue("SNAPSHOTNAME");
this.mtime = Long.parseLong(st.getValue("MTIME"));
readRpcIdsFromXml(st);
}
@ -3499,7 +3514,8 @@ public abstract class FSEditLogOp {
builder.append("CreateSnapshotOp [snapshotRoot=")
.append(snapshotRoot)
.append(", snapshotName=")
.append(snapshotName);
.append(snapshotName)
.append(", mtime=").append(mtime);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();

View File

@ -276,12 +276,17 @@ public class INodeDirectory extends INodeWithAdditionalFields
getDirectorySnapshottableFeature().setSnapshotQuota(snapshotQuota);
}
/**
* Add a snapshot.
* @param name Name of the snapshot.
* @param mtime The snapshot creation time set by Time.now().
*/
public Snapshot addSnapshot(int id, String name,
final LeaseManager leaseManager, final boolean captureOpenFiles,
int maxSnapshotLimit)
int maxSnapshotLimit, long mtime)
throws SnapshotException {
return getDirectorySnapshottableFeature().addSnapshot(this, id, name,
leaseManager, captureOpenFiles, maxSnapshotLimit);
leaseManager, captureOpenFiles, maxSnapshotLimit, mtime);
}
public Snapshot removeSnapshot(

View File

@ -166,10 +166,17 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
this.snapshotsByNames.add(snapshot);
}
/** Add a snapshot. */
/**
* Add a snapshot.
* @param snapshotRoot Root of the snapshot.
* @param name Name of the snapshot.
* @param mtime The snapshot creation time set by Time.now().
* @throws SnapshotException Throw SnapshotException when there is a snapshot
* with the same name already exists or snapshot quota exceeds
*/
public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name,
final LeaseManager leaseManager, final boolean captureOpenFiles,
int maxSnapshotLimit)
int maxSnapshotLimit, long now)
throws SnapshotException {
//check snapshot quota
final int n = getNumSnapshots();
@ -195,8 +202,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
d.setSnapshotRoot(s.getRoot());
snapshotsByNames.add(-i - 1, s);
// set modification time
final long now = Time.now();
// modification time is the snapshot creation time
snapshotRoot.updateModificationTime(now, Snapshot.CURRENT_STATE_ID);
s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID);

View File

@ -312,6 +312,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
* @param iip the INodes resolved from the snapshottable directory's path
* @param snapshotName
* The name of the snapshot.
* @param mtime is the snapshot creation time set by Time.now().
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) there exists a
@ -319,7 +320,8 @@ public class SnapshotManager implements SnapshotStatsMXBean {
* snapshot number exceeds quota
*/
public String createSnapshot(final LeaseManager leaseManager,
final INodesInPath iip, String snapshotRoot, String snapshotName)
final INodesInPath iip, String snapshotRoot, String snapshotName,
long mtime)
throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
@ -333,7 +335,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
}
srcRoot.addSnapshot(snapshotCounter, snapshotName, leaseManager,
this.captureOpenFiles, maxSnapshotLimit);
this.captureOpenFiles, maxSnapshotLimit, mtime);
//create success, update id
snapshotCounter++;

View File

@ -456,6 +456,22 @@ public class TestSnapshot {
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
@Test(timeout = 60000)
public void testSnapshotMtime() throws Exception {
Path dir = new Path("/dir");
Path sub = new Path(dir, "sub");
Path subFile = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
hdfs.allowSnapshot(dir);
Path snapshotPath = hdfs.createSnapshot(dir, "s1");
FileStatus oldSnapshotStatus = hdfs.getFileStatus(snapshotPath);
cluster.restartNameNodes();
FileStatus newSnapshotStatus = hdfs.getFileStatus(snapshotPath);
assertEquals(oldSnapshotStatus.getModificationTime(),
newSnapshotStatus.getModificationTime());
}
/**
* Prepare a list of modifications. A modification may be a file creation,
* file deletion, or a modification operation such as appending to an existing

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@ -60,14 +61,15 @@ public class TestSnapshotManager {
// Create testMaxSnapshotLimit snapshots. These should all succeed.
//
for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
sm.createSnapshot(leaseManager, iip, "dummy", i.toString());
sm.createSnapshot(leaseManager, iip, "dummy", i.toString(), Time.now());
}
// Attempt to create one more snapshot. This should fail due to snapshot
// ID rollover.
//
try {
sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot");
sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot",
Time.now());
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
@ -82,7 +84,8 @@ public class TestSnapshotManager {
// to snapshot ID rollover.
//
try {
sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot2");
sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot2",
Time.now());
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(