HDFS-4692. Use timestamp as default snapshot names.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1467706 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-13 21:41:33 +00:00
parent 9c6a7bebe2
commit 0f78c50ea7
15 changed files with 119 additions and 66 deletions

View File

@ -2265,12 +2265,22 @@ public abstract class FileSystem extends Configured implements Closeable {
) throws IOException {
}
/**
* Create a snapshot with a default name.
* @param path The directory where snapshots will be taken.
* @return the snapshot path.
*/
public final Path createSnapshot(Path path) throws IOException {
return createSnapshot(path, null);
}
/**
* Create a snapshot
* @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot
* @return the snapshot path.
*/
public void createSnapshot(Path path, String snapshotName)
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot");

View File

@ -49,10 +49,10 @@ class SnapshotCommands extends FsCommand {
*/
public static class CreateSnapshot extends FsCommand {
public static final String NAME = CREATE_SNAPSHOT;
public static final String USAGE = "<snapshotDir> <snapshotName>";
public static final String USAGE = "<snapshotDir> [<snapshotName>]";
public static final String DESCRIPTION = "Create a snapshot on a directory";
private String snapshotName;
private String snapshotName = null;
@Override
protected void processPath(PathData item) throws IOException {
@ -63,12 +63,15 @@ class SnapshotCommands extends FsCommand {
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
if (args.size() != 2) {
throw new IOException("args number not 2:" + args.size());
if (args.size() == 0) {
throw new IllegalArgumentException("<snapshotDir> is missing.");
}
if (args.size() > 2) {
throw new IllegalArgumentException("Too many arguements.");
}
if (args.size() == 2) {
snapshotName = args.removeLast();
}
snapshotName = args.removeLast();
// TODO: name length check
}
@Override
@ -108,8 +111,6 @@ class SnapshotCommands extends FsCommand {
throw new IOException("args number not 2: " + args.size());
}
snapshotName = args.removeLast();
// TODO: name length check
}
@Override
@ -151,8 +152,6 @@ class SnapshotCommands extends FsCommand {
}
newName = args.removeLast();
oldName = args.removeLast();
// TODO: new name length check
}
@Override

View File

@ -234,3 +234,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4675. Fix rename across snapshottable directories. (Jing Zhao via
szetszwo)
HDFS-4692. Use timestamp as default snapshot names. (szetszwo)

View File

@ -2021,12 +2021,13 @@ public class DFSClient implements java.io.Closeable {
*
* @param snapshotRoot The directory where the snapshot is to be taken
* @param snapshotName Name of the snapshot
* @return the snapshot path.
* @see ClientProtocol#createSnapshot(String, String)
*/
public void createSnapshot(String snapshotRoot, String snapshotName)
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOpen();
namenode.createSnapshot(snapshotRoot, snapshotName);
return namenode.createSnapshot(snapshotRoot, snapshotName);
}
/**

View File

@ -932,9 +932,9 @@ public class DistributedFileSystem extends FileSystem {
}
@Override
public void createSnapshot(Path path, String snapshotName)
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
dfs.createSnapshot(getPathName(path), snapshotName);
return new Path(dfs.createSnapshot(getPathName(path), snapshotName));
}
@Override

View File

@ -1000,9 +1000,10 @@ public interface ClientProtocol {
* Create a snapshot
* @param snapshotRoot the path that is being snapshotted
* @param snapshotName name of the snapshot created
* @return the snapshot path.
* @throws IOException
*/
public void createSnapshot(String snapshotRoot, String snapshotName)
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException;
/**

View File

@ -165,8 +165,6 @@ import com.google.protobuf.ServiceException;
public class ClientNamenodeProtocolServerSideTranslatorPB implements
ClientNamenodeProtocolPB {
final private ClientProtocol server;
static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
CreateSnapshotResponseProto.newBuilder().build();
static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
DeleteSnapshotResponseProto.newBuilder().build();
static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE =
@ -898,22 +896,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
@Override
public CreateSnapshotResponseProto createSnapshot(RpcController controller,
CreateSnapshotRequestProto request) throws ServiceException {
CreateSnapshotRequestProto req) throws ServiceException {
try {
server.createSnapshot(request.getSnapshotRoot(),
request.getSnapshotName());
final CreateSnapshotResponseProto.Builder builder
= CreateSnapshotResponseProto.newBuilder();
final String snapshotPath = server.createSnapshot(req.getSnapshotRoot(),
req.hasSnapshotName()? req.getSnapshotName(): null);
if (snapshotPath != null) {
builder.setSnapshotPath(snapshotPath);
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_CREATE_SNAPSHOT_RESPONSE;
}
@Override
public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
DeleteSnapshotRequestProto request) throws ServiceException {
DeleteSnapshotRequestProto req) throws ServiceException {
try {
server
.deleteSnapshot(request.getSnapshotRoot(), request.getSnapshotName());
server.deleteSnapshot(req.getSnapshotRoot(), req.getSnapshotName());
return VOID_DELETE_SNAPSHOT_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);

View File

@ -40,13 +40,13 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -56,8 +56,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
@ -112,7 +112,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
@ -882,12 +881,16 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public void createSnapshot(String snapshotRoot, String snapshotName)
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
CreateSnapshotRequestProto req = CreateSnapshotRequestProto.newBuilder()
.setSnapshotRoot(snapshotRoot).setSnapshotName(snapshotName).build();
final CreateSnapshotRequestProto.Builder builder
= CreateSnapshotRequestProto.newBuilder().setSnapshotRoot(snapshotRoot);
if (snapshotName != null) {
builder.setSnapshotName(snapshotName);
}
final CreateSnapshotRequestProto req = builder.build();
try {
rpcProxy.createSnapshot(null, req);
return rpcProxy.createSnapshot(null, req).getSnapshotPath();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}

View File

@ -5731,12 +5731,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
.shouldAvoidStaleDataNodesForWrite();
}
public SnapshotManager getSnapshotManager() {
SnapshotManager getSnapshotManager() {
return snapshotManager;
}
/** Allow snapshot on a directroy. */
public void allowSnapshot(String path) throws SafeModeException, IOException {
void allowSnapshot(String path) throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker();
writeLock();
try {
@ -5762,8 +5762,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
/** Disallow snapshot on a directory. */
public void disallowSnapshot(String path)
throws SafeModeException, IOException {
void disallowSnapshot(String path) throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker();
writeLock();
try {
@ -5793,10 +5792,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @param snapshotRoot The directory path where the snapshot is taken
* @param snapshotName The name of the snapshot
*/
public void createSnapshot(String snapshotRoot, String snapshotName)
String createSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker();
writeLock();
final String snapshotPath;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
@ -5805,9 +5805,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
checkOwner(pc, snapshotRoot);
if (snapshotName == null || snapshotName.isEmpty()) {
snapshotName = Snapshot.generateDefaultSnapshotName();
}
dir.verifyMaxComponentLength(snapshotName, snapshotRoot, 0);
dir.writeLock();
try {
snapshotManager.createSnapshot(snapshotRoot, snapshotName);
snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName);
} finally {
dir.writeUnlock();
}
@ -5818,11 +5822,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
+ Path.SEPARATOR + snapshotName);
logAuditEvent(true, "createSnapshot", snapshotRoot, rootPath.toString(),
null);
logAuditEvent(true, "createSnapshot", snapshotRoot, snapshotPath, null);
}
return snapshotPath;
}
/**
@ -5833,7 +5835,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws SafeModeException
* @throws IOException
*/
public void renameSnapshot(String path, String snapshotOldName,
void renameSnapshot(String path, String snapshotOldName,
String snapshotNewName) throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker();
writeLock();
@ -5844,8 +5846,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
safeMode);
}
checkOwner(pc, path);
// TODO: check if the new name is valid. May also need this for
// creationSnapshot
dir.verifyMaxComponentLength(snapshotNewName, path, 0);
snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName);
getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName);
@ -5905,7 +5906,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* and labeled as M/-/+/R respectively.
* @throws IOException
*/
public SnapshotDiffReport getSnapshotDiffReport(String path,
SnapshotDiffReport getSnapshotDiffReport(String path,
String fromSnapshot, String toSnapshot) throws IOException {
SnapshotDiffInfo diffs = null;
readLock();
@ -5931,7 +5932,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws SafeModeException
* @throws IOException
*/
public void deleteSnapshot(String snapshotRoot, String snapshotName)
void deleteSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker();
writeLock();

View File

@ -1093,14 +1093,14 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override
public void createSnapshot(String snapshotRoot, String snapshotName)
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
if (!checkPathLength(snapshotRoot)) {
throw new IOException("createSnapshot: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
metrics.incrCreateSnapshotOps();
namesystem.createSnapshot(snapshotRoot, snapshotName);
return namesystem.createSnapshot(snapshotRoot, snapshotName);
}
@Override
@ -1127,6 +1127,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
@Override
public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException {
if (snapshotNewName == null || snapshotNewName.isEmpty()) {
throw new IOException("The new snapshot name is null or empty.");
}
metrics.incrRenameSnapshotOps();
namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
}

View File

@ -288,7 +288,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
final int i = searchSnapshot(nameBytes);
if (i >= 0) {
throw new SnapshotException("Failed to add snapshot: there is already a "
+ "snapshot with the same name \"" + name + "\".");
+ "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
}
final DirectoryDiff d = getDiffs().addDiff(s, this);

View File

@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Comparator;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
@ -37,6 +39,30 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
public class Snapshot implements Comparable<byte[]> {
public static final int INVALID_ID = -1;
/**
* The pattern for generating the default snapshot name.
* E.g. s20130412-151029.033
*/
private static final String DEFAULT_SNAPSHOT_NAME_PATTERN = "'s'yyyyMMdd-HHmmss.SSS";
public static String generateDefaultSnapshotName() {
return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
}
static String getSnapshotPath(String snapshottableDir, String snapshotName) {
return new Path(snapshottableDir, HdfsConstants.DOT_SNAPSHOT_DIR
+ Path.SEPARATOR + snapshotName).toString();
}
/**
* Get the name of the given snapshot.
* @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null
*/
static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : "";
}
/**
* Compare snapshot IDs. Null indicates the current status thus is greater
* than non-null snapshots.
@ -78,15 +104,6 @@ public class Snapshot implements Comparable<byte[]> {
}
return latest;
}
/**
* Get the name of the given snapshot.
* @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null
*/
public static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : "";
}
/** The root directory of the snapshot. */
public class Root extends INodeDirectory {

View File

@ -127,19 +127,19 @@ public class SnapshotManager implements SnapshotStats {
* snapshot with the given name for the directory, and/or 3)
* snapshot number exceeds quota
*/
public void createSnapshot(final String path, final String snapshotName
public String createSnapshot(final String path, String snapshotName
) throws IOException {
// Find the source root directory path where the snapshot is taken.
final INodesInPath i = fsdir.getINodesInPath4Write(path);
final INodeDirectorySnapshottable srcRoot
= INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
fsdir.verifyMaxComponentLength(snapshotName, path, 0);
srcRoot.addSnapshot(snapshotCounter, snapshotName);
//create success, update id
snapshotCounter++;
numSnapshots.getAndIncrement();
return Snapshot.getSnapshotPath(path, snapshotName);
}
/**

View File

@ -450,10 +450,11 @@ message GetDataEncryptionKeyResponseProto {
message CreateSnapshotRequestProto {
required string snapshotRoot = 1;
required string snapshotName = 2;
optional string snapshotName = 2;
}
message CreateSnapshotResponseProto { // void response
message CreateSnapshotResponseProto {
required string snapshotPath = 1;
}
message RenameSnapshotRequestProto {

View File

@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnap
import java.io.IOException;
import java.util.Random;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@ -178,7 +179,19 @@ public class TestNestedSnapshots {
final Path foo = new Path(dir, "foo");
final Path f1 = new Path(foo, "f1");
DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
hdfs.createSnapshot(dir, "s0");
{
//create a snapshot with default snapshot name
final Path snapshotPath = hdfs.createSnapshot(dir);
//check snapshot path and the default snapshot name
final String snapshotName = snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
"s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
snapshotName));
final Path parent = snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
Assert.assertEquals(dir, parent.getParent());
}
final Path f2 = new Path(foo, "f2");
DFSTestUtil.createFile(hdfs, f2, BLOCKSIZE, REPLICATION, SEED);
@ -193,7 +206,7 @@ public class TestNestedSnapshots {
try {
// createSnapshot should fail with quota
hdfs.createSnapshot(dir, "s1");
hdfs.createSnapshot(dir);
Assert.fail();
} catch(RemoteException re) {
final IOException ioe = re.unwrapRemoteException();