HDFS-4692. Use timestamp as default snapshot names.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1467706 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-13 21:41:33 +00:00
parent 9c6a7bebe2
commit 0f78c50ea7
15 changed files with 119 additions and 66 deletions

View File

@ -2265,12 +2265,22 @@ public abstract class FileSystem extends Configured implements Closeable {
) throws IOException { ) throws IOException {
} }
/**
* Create a snapshot with a default name.
* @param path The directory where snapshots will be taken.
* @return the snapshot path.
*/
public final Path createSnapshot(Path path) throws IOException {
return createSnapshot(path, null);
}
/** /**
* Create a snapshot * Create a snapshot
* @param path The directory where snapshots will be taken. * @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
* @return the snapshot path.
*/ */
public void createSnapshot(Path path, String snapshotName) public Path createSnapshot(Path path, String snapshotName)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot"); + " doesn't support createSnapshot");

View File

@ -49,10 +49,10 @@ class SnapshotCommands extends FsCommand {
*/ */
public static class CreateSnapshot extends FsCommand { public static class CreateSnapshot extends FsCommand {
public static final String NAME = CREATE_SNAPSHOT; public static final String NAME = CREATE_SNAPSHOT;
public static final String USAGE = "<snapshotDir> <snapshotName>"; public static final String USAGE = "<snapshotDir> [<snapshotName>]";
public static final String DESCRIPTION = "Create a snapshot on a directory"; public static final String DESCRIPTION = "Create a snapshot on a directory";
private String snapshotName; private String snapshotName = null;
@Override @Override
protected void processPath(PathData item) throws IOException { protected void processPath(PathData item) throws IOException {
@ -63,12 +63,15 @@ class SnapshotCommands extends FsCommand {
@Override @Override
protected void processOptions(LinkedList<String> args) throws IOException { protected void processOptions(LinkedList<String> args) throws IOException {
if (args.size() != 2) { if (args.size() == 0) {
throw new IOException("args number not 2:" + args.size()); throw new IllegalArgumentException("<snapshotDir> is missing.");
} }
if (args.size() > 2) {
throw new IllegalArgumentException("Too many arguements.");
}
if (args.size() == 2) {
snapshotName = args.removeLast(); snapshotName = args.removeLast();
// TODO: name length check }
} }
@Override @Override
@ -108,8 +111,6 @@ class SnapshotCommands extends FsCommand {
throw new IOException("args number not 2: " + args.size()); throw new IOException("args number not 2: " + args.size());
} }
snapshotName = args.removeLast(); snapshotName = args.removeLast();
// TODO: name length check
} }
@Override @Override
@ -151,8 +152,6 @@ class SnapshotCommands extends FsCommand {
} }
newName = args.removeLast(); newName = args.removeLast();
oldName = args.removeLast(); oldName = args.removeLast();
// TODO: new name length check
} }
@Override @Override

View File

@ -234,3 +234,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4675. Fix rename across snapshottable directories. (Jing Zhao via HDFS-4675. Fix rename across snapshottable directories. (Jing Zhao via
szetszwo) szetszwo)
HDFS-4692. Use timestamp as default snapshot names. (szetszwo)

View File

@ -2021,12 +2021,13 @@ public class DFSClient implements java.io.Closeable {
* *
* @param snapshotRoot The directory where the snapshot is to be taken * @param snapshotRoot The directory where the snapshot is to be taken
* @param snapshotName Name of the snapshot * @param snapshotName Name of the snapshot
* @return the snapshot path.
* @see ClientProtocol#createSnapshot(String, String) * @see ClientProtocol#createSnapshot(String, String)
*/ */
public void createSnapshot(String snapshotRoot, String snapshotName) public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException { throws IOException {
checkOpen(); checkOpen();
namenode.createSnapshot(snapshotRoot, snapshotName); return namenode.createSnapshot(snapshotRoot, snapshotName);
} }
/** /**

View File

@ -932,9 +932,9 @@ public class DistributedFileSystem extends FileSystem {
} }
@Override @Override
public void createSnapshot(Path path, String snapshotName) public Path createSnapshot(Path path, String snapshotName)
throws IOException { throws IOException {
dfs.createSnapshot(getPathName(path), snapshotName); return new Path(dfs.createSnapshot(getPathName(path), snapshotName));
} }
@Override @Override

View File

@ -1000,9 +1000,10 @@ public interface ClientProtocol {
* Create a snapshot * Create a snapshot
* @param snapshotRoot the path that is being snapshotted * @param snapshotRoot the path that is being snapshotted
* @param snapshotName name of the snapshot created * @param snapshotName name of the snapshot created
* @return the snapshot path.
* @throws IOException * @throws IOException
*/ */
public void createSnapshot(String snapshotRoot, String snapshotName) public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException; throws IOException;
/** /**

View File

@ -165,8 +165,6 @@ import com.google.protobuf.ServiceException;
public class ClientNamenodeProtocolServerSideTranslatorPB implements public class ClientNamenodeProtocolServerSideTranslatorPB implements
ClientNamenodeProtocolPB { ClientNamenodeProtocolPB {
final private ClientProtocol server; final private ClientProtocol server;
static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
CreateSnapshotResponseProto.newBuilder().build();
static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE = static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
DeleteSnapshotResponseProto.newBuilder().build(); DeleteSnapshotResponseProto.newBuilder().build();
static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE = static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE =
@ -898,22 +896,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
@Override @Override
public CreateSnapshotResponseProto createSnapshot(RpcController controller, public CreateSnapshotResponseProto createSnapshot(RpcController controller,
CreateSnapshotRequestProto request) throws ServiceException { CreateSnapshotRequestProto req) throws ServiceException {
try { try {
server.createSnapshot(request.getSnapshotRoot(), final CreateSnapshotResponseProto.Builder builder
request.getSnapshotName()); = CreateSnapshotResponseProto.newBuilder();
final String snapshotPath = server.createSnapshot(req.getSnapshotRoot(),
req.hasSnapshotName()? req.getSnapshotName(): null);
if (snapshotPath != null) {
builder.setSnapshotPath(snapshotPath);
}
return builder.build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
return VOID_CREATE_SNAPSHOT_RESPONSE;
} }
@Override @Override
public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller, public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
DeleteSnapshotRequestProto request) throws ServiceException { DeleteSnapshotRequestProto req) throws ServiceException {
try { try {
server server.deleteSnapshot(req.getSnapshotRoot(), req.getSnapshotName());
.deleteSnapshot(request.getSnapshotRoot(), request.getSnapshotName());
return VOID_DELETE_SNAPSHOT_RESPONSE; return VOID_DELETE_SNAPSHOT_RESPONSE;
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);

View File

@ -40,13 +40,13 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -56,8 +56,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
@ -112,7 +112,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -882,12 +881,16 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public void createSnapshot(String snapshotRoot, String snapshotName) public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException { throws IOException {
CreateSnapshotRequestProto req = CreateSnapshotRequestProto.newBuilder() final CreateSnapshotRequestProto.Builder builder
.setSnapshotRoot(snapshotRoot).setSnapshotName(snapshotName).build(); = CreateSnapshotRequestProto.newBuilder().setSnapshotRoot(snapshotRoot);
if (snapshotName != null) {
builder.setSnapshotName(snapshotName);
}
final CreateSnapshotRequestProto req = builder.build();
try { try {
rpcProxy.createSnapshot(null, req); return rpcProxy.createSnapshot(null, req).getSnapshotPath();
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }

View File

@ -5731,12 +5731,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
.shouldAvoidStaleDataNodesForWrite(); .shouldAvoidStaleDataNodesForWrite();
} }
public SnapshotManager getSnapshotManager() { SnapshotManager getSnapshotManager() {
return snapshotManager; return snapshotManager;
} }
/** Allow snapshot on a directroy. */ /** Allow snapshot on a directroy. */
public void allowSnapshot(String path) throws SafeModeException, IOException { void allowSnapshot(String path) throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();
try { try {
@ -5762,8 +5762,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
/** Disallow snapshot on a directory. */ /** Disallow snapshot on a directory. */
public void disallowSnapshot(String path) void disallowSnapshot(String path) throws SafeModeException, IOException {
throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();
try { try {
@ -5793,10 +5792,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @param snapshotRoot The directory path where the snapshot is taken * @param snapshotRoot The directory path where the snapshot is taken
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
*/ */
public void createSnapshot(String snapshotRoot, String snapshotName) String createSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException { throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();
final String snapshotPath;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { if (isInSafeMode()) {
@ -5805,9 +5805,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
checkOwner(pc, snapshotRoot); checkOwner(pc, snapshotRoot);
if (snapshotName == null || snapshotName.isEmpty()) {
snapshotName = Snapshot.generateDefaultSnapshotName();
}
dir.verifyMaxComponentLength(snapshotName, snapshotRoot, 0);
dir.writeLock(); dir.writeLock();
try { try {
snapshotManager.createSnapshot(snapshotRoot, snapshotName); snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName);
} finally { } finally {
dir.writeUnlock(); dir.writeUnlock();
} }
@ -5818,11 +5822,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) { if (auditLog.isInfoEnabled() && isExternalInvocation()) {
Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR logAuditEvent(true, "createSnapshot", snapshotRoot, snapshotPath, null);
+ Path.SEPARATOR + snapshotName);
logAuditEvent(true, "createSnapshot", snapshotRoot, rootPath.toString(),
null);
} }
return snapshotPath;
} }
/** /**
@ -5833,7 +5835,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws SafeModeException * @throws SafeModeException
* @throws IOException * @throws IOException
*/ */
public void renameSnapshot(String path, String snapshotOldName, void renameSnapshot(String path, String snapshotOldName,
String snapshotNewName) throws SafeModeException, IOException { String snapshotNewName) throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();
@ -5844,8 +5846,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
safeMode); safeMode);
} }
checkOwner(pc, path); checkOwner(pc, path);
// TODO: check if the new name is valid. May also need this for dir.verifyMaxComponentLength(snapshotNewName, path, 0);
// creationSnapshot
snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName); snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName);
getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName); getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName);
@ -5905,7 +5906,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* and labeled as M/-/+/R respectively. * and labeled as M/-/+/R respectively.
* @throws IOException * @throws IOException
*/ */
public SnapshotDiffReport getSnapshotDiffReport(String path, SnapshotDiffReport getSnapshotDiffReport(String path,
String fromSnapshot, String toSnapshot) throws IOException { String fromSnapshot, String toSnapshot) throws IOException {
SnapshotDiffInfo diffs = null; SnapshotDiffInfo diffs = null;
readLock(); readLock();
@ -5931,7 +5932,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws SafeModeException * @throws SafeModeException
* @throws IOException * @throws IOException
*/ */
public void deleteSnapshot(String snapshotRoot, String snapshotName) void deleteSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException { throws SafeModeException, IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();

View File

@ -1093,14 +1093,14 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override @Override
public void createSnapshot(String snapshotRoot, String snapshotName) public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException { throws IOException {
if (!checkPathLength(snapshotRoot)) { if (!checkPathLength(snapshotRoot)) {
throw new IOException("createSnapshot: Pathname too long. Limit " throw new IOException("createSnapshot: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
} }
metrics.incrCreateSnapshotOps(); metrics.incrCreateSnapshotOps();
namesystem.createSnapshot(snapshotRoot, snapshotName); return namesystem.createSnapshot(snapshotRoot, snapshotName);
} }
@Override @Override
@ -1127,6 +1127,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
@Override @Override
public void renameSnapshot(String snapshotRoot, String snapshotOldName, public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException { String snapshotNewName) throws IOException {
if (snapshotNewName == null || snapshotNewName.isEmpty()) {
throw new IOException("The new snapshot name is null or empty.");
}
metrics.incrRenameSnapshotOps(); metrics.incrRenameSnapshotOps();
namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName); namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
} }

View File

@ -288,7 +288,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
final int i = searchSnapshot(nameBytes); final int i = searchSnapshot(nameBytes);
if (i >= 0) { if (i >= 0) {
throw new SnapshotException("Failed to add snapshot: there is already a " throw new SnapshotException("Failed to add snapshot: there is already a "
+ "snapshot with the same name \"" + name + "\"."); + "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
} }
final DirectoryDiff d = getDiffs().addDiff(s, this); final DirectoryDiff d = getDiffs().addDiff(s, this);

View File

@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput; import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Comparator; import java.util.Comparator;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -37,6 +39,30 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
public class Snapshot implements Comparable<byte[]> { public class Snapshot implements Comparable<byte[]> {
public static final int INVALID_ID = -1; public static final int INVALID_ID = -1;
/**
* The pattern for generating the default snapshot name.
* E.g. s20130412-151029.033
*/
private static final String DEFAULT_SNAPSHOT_NAME_PATTERN = "'s'yyyyMMdd-HHmmss.SSS";
public static String generateDefaultSnapshotName() {
return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
}
static String getSnapshotPath(String snapshottableDir, String snapshotName) {
return new Path(snapshottableDir, HdfsConstants.DOT_SNAPSHOT_DIR
+ Path.SEPARATOR + snapshotName).toString();
}
/**
* Get the name of the given snapshot.
* @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null
*/
static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : "";
}
/** /**
* Compare snapshot IDs. Null indicates the current status thus is greater * Compare snapshot IDs. Null indicates the current status thus is greater
* than non-null snapshots. * than non-null snapshots.
@ -79,15 +105,6 @@ public class Snapshot implements Comparable<byte[]> {
return latest; return latest;
} }
/**
* Get the name of the given snapshot.
* @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null
*/
public static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : "";
}
/** The root directory of the snapshot. */ /** The root directory of the snapshot. */
public class Root extends INodeDirectory { public class Root extends INodeDirectory {
Root(INodeDirectory other) { Root(INodeDirectory other) {

View File

@ -127,19 +127,19 @@ public class SnapshotManager implements SnapshotStats {
* snapshot with the given name for the directory, and/or 3) * snapshot with the given name for the directory, and/or 3)
* snapshot number exceeds quota * snapshot number exceeds quota
*/ */
public void createSnapshot(final String path, final String snapshotName public String createSnapshot(final String path, String snapshotName
) throws IOException { ) throws IOException {
// Find the source root directory path where the snapshot is taken. // Find the source root directory path where the snapshot is taken.
final INodesInPath i = fsdir.getINodesInPath4Write(path); final INodesInPath i = fsdir.getINodesInPath4Write(path);
final INodeDirectorySnapshottable srcRoot final INodeDirectorySnapshottable srcRoot
= INodeDirectorySnapshottable.valueOf(i.getLastINode(), path); = INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
fsdir.verifyMaxComponentLength(snapshotName, path, 0);
srcRoot.addSnapshot(snapshotCounter, snapshotName); srcRoot.addSnapshot(snapshotCounter, snapshotName);
//create success, update id //create success, update id
snapshotCounter++; snapshotCounter++;
numSnapshots.getAndIncrement(); numSnapshots.getAndIncrement();
return Snapshot.getSnapshotPath(path, snapshotName);
} }
/** /**

View File

@ -450,10 +450,11 @@ message GetDataEncryptionKeyResponseProto {
message CreateSnapshotRequestProto { message CreateSnapshotRequestProto {
required string snapshotRoot = 1; required string snapshotRoot = 1;
required string snapshotName = 2; optional string snapshotName = 2;
} }
message CreateSnapshotResponseProto { // void response message CreateSnapshotResponseProto {
required string snapshotPath = 1;
} }
message RenameSnapshotRequestProto { message RenameSnapshotRequestProto {

View File

@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnap
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -178,7 +179,19 @@ public class TestNestedSnapshots {
final Path foo = new Path(dir, "foo"); final Path foo = new Path(dir, "foo");
final Path f1 = new Path(foo, "f1"); final Path f1 = new Path(foo, "f1");
DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED); DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
hdfs.createSnapshot(dir, "s0"); {
//create a snapshot with default snapshot name
final Path snapshotPath = hdfs.createSnapshot(dir);
//check snapshot path and the default snapshot name
final String snapshotName = snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
"s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
snapshotName));
final Path parent = snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
Assert.assertEquals(dir, parent.getParent());
}
final Path f2 = new Path(foo, "f2"); final Path f2 = new Path(foo, "f2");
DFSTestUtil.createFile(hdfs, f2, BLOCKSIZE, REPLICATION, SEED); DFSTestUtil.createFile(hdfs, f2, BLOCKSIZE, REPLICATION, SEED);
@ -193,7 +206,7 @@ public class TestNestedSnapshots {
try { try {
// createSnapshot should fail with quota // createSnapshot should fail with quota
hdfs.createSnapshot(dir, "s1"); hdfs.createSnapshot(dir);
Assert.fail(); Assert.fail();
} catch(RemoteException re) { } catch(RemoteException re) {
final IOException ioe = re.unwrapRemoteException(); final IOException ioe = re.unwrapRemoteException();