HDFS-4244. Support snapshot deletion. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1430953 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
93872b58e5
commit
2d4097840c
|
@ -2221,15 +2221,26 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
/**
|
||||
* Create a snapshot
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @param path The directory where snapshots will be taken.
|
||||
* @param snapshotName The name of the snapshot
|
||||
*/
|
||||
public void createSnapshot(String snapshotName, String path)
|
||||
public void createSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support createSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot of a directory
|
||||
* @param path The directory that the to-be-deleted snapshot belongs to
|
||||
* @param snapshotName The name of the snapshot
|
||||
*/
|
||||
public void deleteSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support deleteSnapshot");
|
||||
}
|
||||
|
||||
// making it volatile to be able to do a double checked locking
|
||||
private volatile static boolean FILE_SYSTEMS_LOADED = false;
|
||||
|
||||
|
|
|
@ -33,9 +33,11 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
|||
|
||||
class SnapshotCommands extends FsCommand {
|
||||
private final static String CREATE_SNAPSHOT = "createSnapshot";
|
||||
private final static String DELETE_SNAPSHOT = "deleteSnapshot";
|
||||
|
||||
public static void registerCommands(CommandFactory factory) {
|
||||
factory.addClass(CreateSnapshot.class, "-" + CREATE_SNAPSHOT);
|
||||
factory.addClass(DeleteSnapshot.class, "-" + DELETE_SNAPSHOT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -74,9 +76,49 @@ class SnapshotCommands extends FsCommand {
|
|||
}
|
||||
assert(items.size() == 1);
|
||||
PathData sroot = items.getFirst();
|
||||
String snapshotRoot = sroot.path.toString();
|
||||
sroot.fs.createSnapshot(snapshotName, snapshotRoot);
|
||||
sroot.fs.createSnapshot(sroot.path, snapshotName);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot
|
||||
*/
|
||||
public static class DeleteSnapshot extends FsCommand {
|
||||
public static final String NAME = DELETE_SNAPSHOT;
|
||||
public static final String USAGE = "<snapshotName> <snapshotDir>";
|
||||
public static final String DESCRIPTION =
|
||||
"Delete a snapshot from a directory";
|
||||
|
||||
private static String snapshotName;
|
||||
|
||||
@Override
|
||||
protected void processPath(PathData item) throws IOException {
|
||||
if (!item.stat.isDirectory()) {
|
||||
throw new PathIsNotDirectoryException(item.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
if (args.size() != 2) {
|
||||
throw new IOException("args number not 2: " + args.size());
|
||||
}
|
||||
snapshotName = args.removeFirst();
|
||||
// TODO: name length check
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processArguments(LinkedList<PathData> items)
|
||||
throws IOException {
|
||||
super.processArguments(items);
|
||||
if (exitCode != 0) { // check for error collecting paths
|
||||
return;
|
||||
}
|
||||
assert (items.size() == 1);
|
||||
PathData sroot = items.getFirst();
|
||||
sroot.fs.deleteSnapshot(sroot.path, snapshotName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -95,3 +95,5 @@ Branch-2802 Snapshot (Unreleased)
|
|||
|
||||
HDFS-4230. Support listing of all the snapshottable directories. (Jing Zhao
|
||||
via szetszwo)
|
||||
|
||||
HDFS-4244. Support snapshot deletion. (Jing Zhao via szetszwo)
|
||||
|
|
|
@ -1908,13 +1908,28 @@ public class DFSClient implements java.io.Closeable {
|
|||
/**
|
||||
* Create one snapshot.
|
||||
*
|
||||
* @see ClientProtocol#createSnapshot(String snapshotName, String
|
||||
* snapshotRoot)
|
||||
* @param snapshotRoot The directory where the snapshot is to be taken
|
||||
* @param snapshotName Name of the snapshot
|
||||
* @see ClientProtocol#createSnapshot(String, String)
|
||||
*/
|
||||
public void createSnapshot(String snapshotName, String snapshotRoot)
|
||||
public void createSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
namenode.createSnapshot(snapshotName, snapshotRoot);
|
||||
namenode.createSnapshot(snapshotRoot, snapshotName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot of a snapshottable directory.
|
||||
*
|
||||
* @param snapshotRoot The snapshottable directory that the
|
||||
* to-be-deleted snapshot belongs to
|
||||
* @param snapshotName The name of the to-be-deleted snapshot
|
||||
* @throws IOException
|
||||
* @see ClientProtocol#deleteSnapshot(String, String)
|
||||
*/
|
||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
namenode.deleteSnapshot(snapshotRoot, snapshotName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -927,9 +927,9 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void createSnapshot(String snapshotName, String path)
|
||||
public void createSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
dfs.createSnapshot(snapshotName, path);
|
||||
dfs.createSnapshot(getPathName(path), snapshotName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -939,9 +939,9 @@ public class DistributedFileSystem extends FileSystem {
|
|||
* @param snapshotNewName New name of the snapshot
|
||||
* @throws IOException
|
||||
*/
|
||||
public void renameSnapshot(String path, String snapshotOldName,
|
||||
public void renameSnapshot(Path path, String snapshotOldName,
|
||||
String snapshotNewName) throws IOException {
|
||||
dfs.renameSnapshot(path, snapshotOldName, snapshotNewName);
|
||||
dfs.renameSnapshot(getPathName(path), snapshotOldName, snapshotNewName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -952,4 +952,10 @@ public class DistributedFileSystem extends FileSystem {
|
|||
throws IOException {
|
||||
return dfs.getSnapshottableDirListing();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSnapshot(Path snapshotDir, String snapshotName)
|
||||
throws IOException {
|
||||
dfs.deleteSnapshot(getPathName(snapshotDir), snapshotName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -980,11 +980,20 @@ public interface ClientProtocol {
|
|||
|
||||
/**
|
||||
* Create a snapshot
|
||||
* @param snapshotName name of the snapshot created
|
||||
* @param snapshotRoot the path that is being snapshotted
|
||||
* @param snapshotName name of the snapshot created
|
||||
* @throws IOException
|
||||
*/
|
||||
public void createSnapshot(String snapshotName, String snapshotRoot)
|
||||
public void createSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete a specific snapshot of a snapshottable directory
|
||||
* @param snapshotRoot The snapshottable directory
|
||||
* @param snapshotName Name of the snapshot for the snapshottable directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -1011,6 +1020,6 @@ public interface ClientProtocol {
|
|||
* @throws IOException
|
||||
*/
|
||||
public void disallowSnapshot(String snapshotRoot)
|
||||
throws IOException;
|
||||
throws IOException;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
|
||||
|
@ -161,6 +163,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
final private ClientProtocol server;
|
||||
static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
|
||||
CreateSnapshotResponseProto.newBuilder().build();
|
||||
static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
|
||||
DeleteSnapshotResponseProto.newBuilder().build();
|
||||
static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE =
|
||||
RenameSnapshotResponseProto.newBuilder().build();
|
||||
static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE =
|
||||
|
@ -868,14 +872,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
public CreateSnapshotResponseProto createSnapshot(RpcController controller,
|
||||
CreateSnapshotRequestProto request) throws ServiceException {
|
||||
try {
|
||||
server.createSnapshot(request.getSnapshotName(),
|
||||
request.getSnapshotRoot());
|
||||
server.createSnapshot(request.getSnapshotRoot(),
|
||||
request.getSnapshotName());
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return VOID_CREATE_SNAPSHOT_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
|
||||
DeleteSnapshotRequestProto request) throws ServiceException {
|
||||
try {
|
||||
server
|
||||
.deleteSnapshot(request.getSnapshotRoot(), request.getSnapshotName());
|
||||
return VOID_DELETE_SNAPSHOT_RESPONSE;
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllowSnapshotResponseProto allowSnapshot(RpcController controller,
|
||||
AllowSnapshotRequestProto req) throws ServiceException {
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
|
||||
|
@ -835,17 +836,29 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public void createSnapshot(String snapshotName, String snapshotRoot)
|
||||
public void createSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
CreateSnapshotRequestProto req = CreateSnapshotRequestProto.newBuilder()
|
||||
.setSnapshotName(snapshotName).setSnapshotRoot(snapshotRoot).build();
|
||||
.setSnapshotRoot(snapshotRoot).setSnapshotName(snapshotName).build();
|
||||
try {
|
||||
rpcProxy.createSnapshot(null, req);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
DeleteSnapshotRequestProto req = DeleteSnapshotRequestProto.newBuilder()
|
||||
.setSnapshotRoot(snapshotRoot).setSnapshotName(snapshotName).build();
|
||||
try {
|
||||
rpcProxy.deleteSnapshot(null, req);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allowSnapshot(String snapshotRoot) throws IOException {
|
||||
AllowSnapshotRequestProto req = AllowSnapshotRequestProto.newBuilder()
|
||||
|
|
|
@ -868,15 +868,15 @@ public class FSEditLog implements LogsPurgeable {
|
|||
logEdit(op);
|
||||
}
|
||||
|
||||
void logCreateSnapshot(String snapName, String snapRoot) {
|
||||
void logCreateSnapshot(String snapRoot, String snapName) {
|
||||
CreateSnapshotOp op = CreateSnapshotOp.getInstance(cache.get())
|
||||
.setSnapshotName(snapName).setSnapshotRoot(snapRoot);
|
||||
.setSnapshotRoot(snapRoot).setSnapshotName(snapName);
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
void logDeleteSnapshot(String snapName, String snapRoot) {
|
||||
void logDeleteSnapshot(String snapRoot, String snapName) {
|
||||
DeleteSnapshotOp op = DeleteSnapshotOp.getInstance(cache.get())
|
||||
.setSnapshotName(snapName).setSnapshotRoot(snapRoot);
|
||||
.setSnapshotRoot(snapRoot).setSnapshotName(snapName);
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
|
|
|
@ -2170,8 +2170,8 @@ public abstract class FSEditLogOp {
|
|||
* Operation corresponding to creating a snapshot
|
||||
*/
|
||||
static class CreateSnapshotOp extends FSEditLogOp {
|
||||
String snapshotName;
|
||||
String snapshotRoot;
|
||||
String snapshotName;
|
||||
|
||||
public CreateSnapshotOp() {
|
||||
super(OP_CREATE_SNAPSHOT);
|
||||
|
@ -2193,35 +2193,35 @@ public abstract class FSEditLogOp {
|
|||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
snapshotName = FSImageSerialization.readString(in);
|
||||
snapshotRoot = FSImageSerialization.readString(in);
|
||||
snapshotName = FSImageSerialization.readString(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
FSImageSerialization.writeString(snapshotName, out);
|
||||
FSImageSerialization.writeString(snapshotRoot, out);
|
||||
FSImageSerialization.writeString(snapshotName, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXml(ContentHandler contentHandler) throws SAXException {
|
||||
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
|
||||
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
|
||||
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
void fromXml(Stanza st) throws InvalidXmlException {
|
||||
snapshotName = st.getValue("SNAPSHOTNAME");
|
||||
snapshotRoot = st.getValue("SNAPSHOTROOT");
|
||||
snapshotName = st.getValue("SNAPSHOTNAME");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("CreateSnapshotOp [snapshotName=");
|
||||
builder.append(snapshotName);
|
||||
builder.append(", snapshotRoot=");
|
||||
builder.append("CreateSnapshotOp [snapshotRoot=");
|
||||
builder.append(snapshotRoot);
|
||||
builder.append(", snapshotName=");
|
||||
builder.append(snapshotName);
|
||||
builder.append("]");
|
||||
return builder.toString();
|
||||
}
|
||||
|
@ -2231,8 +2231,8 @@ public abstract class FSEditLogOp {
|
|||
* Operation corresponding to delete a snapshot
|
||||
*/
|
||||
static class DeleteSnapshotOp extends FSEditLogOp {
|
||||
String snapshotName;
|
||||
String snapshotRoot;
|
||||
String snapshotName;
|
||||
|
||||
DeleteSnapshotOp() {
|
||||
super(OP_DELETE_SNAPSHOT);
|
||||
|
@ -2254,35 +2254,35 @@ public abstract class FSEditLogOp {
|
|||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
snapshotName = FSImageSerialization.readString(in);
|
||||
snapshotRoot = FSImageSerialization.readString(in);
|
||||
snapshotName = FSImageSerialization.readString(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
FSImageSerialization.writeString(snapshotName, out);
|
||||
FSImageSerialization.writeString(snapshotRoot, out);
|
||||
FSImageSerialization.writeString(snapshotName, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXml(ContentHandler contentHandler) throws SAXException {
|
||||
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
|
||||
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
|
||||
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
void fromXml(Stanza st) throws InvalidXmlException {
|
||||
snapshotName = st.getValue("SNAPSHOTNAME");
|
||||
snapshotRoot = st.getValue("SNAPSHOTROOT");
|
||||
snapshotName = st.getValue("SNAPSHOTNAME");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("DeleteSnapshotOp [snapshotName=");
|
||||
builder.append(snapshotName);
|
||||
builder.append(", snapshotRoot=");
|
||||
builder.append("DeleteSnapshotOp [snapshotRoot=");
|
||||
builder.append(snapshotRoot);
|
||||
builder.append(", snapshotName=");
|
||||
builder.append(snapshotName);
|
||||
builder.append("]");
|
||||
return builder.toString();
|
||||
}
|
||||
|
|
|
@ -5680,37 +5680,37 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
/**
|
||||
* Create a snapshot
|
||||
* @param snapshotRoot The directory path where the snapshot is taken
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @param path The directory path where the snapshot is taken
|
||||
*/
|
||||
public void createSnapshot(String snapshotName, String path)
|
||||
public void createSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws SafeModeException, IOException {
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException("Cannot create snapshot for " + path,
|
||||
safeMode);
|
||||
throw new SafeModeException("Cannot create snapshot for "
|
||||
+ snapshotRoot, safeMode);
|
||||
}
|
||||
checkOwner(path);
|
||||
checkOwner(snapshotRoot);
|
||||
|
||||
dir.writeLock();
|
||||
try {
|
||||
snapshotManager.createSnapshot(snapshotName, path);
|
||||
snapshotManager.createSnapshot(snapshotRoot, snapshotName);
|
||||
} finally {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
getEditLog().logCreateSnapshot(snapshotName, path);
|
||||
getEditLog().logCreateSnapshot(snapshotRoot, snapshotName);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
|
||||
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
|
||||
Path snapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR + "/"
|
||||
+ snapshotName);
|
||||
Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
|
||||
+ Path.SEPARATOR + snapshotName);
|
||||
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
|
||||
"createSnapshot", path, snapshotRoot.toString(), null);
|
||||
"createSnapshot", snapshotRoot, rootPath.toString(), null);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5775,6 +5775,48 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot of a snapshottable directory
|
||||
* @param snapshotRoot The snapshottable directory
|
||||
* @param snapshotName The name of the to-be-deleted snapshot
|
||||
* @throws SafeModeException
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws SafeModeException, IOException {
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException(
|
||||
"Cannot delete snapshot for " + snapshotRoot, safeMode);
|
||||
}
|
||||
checkOwner(snapshotRoot);
|
||||
|
||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||
dir.writeLock();
|
||||
try {
|
||||
snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,
|
||||
collectedBlocks);
|
||||
} finally {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
this.removeBlocks(collectedBlocks);
|
||||
collectedBlocks.clear();
|
||||
getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
|
||||
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
|
||||
Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
|
||||
+ Path.SEPARATOR + snapshotName);
|
||||
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
|
||||
"deleteSnapshot", rootPath.toString(), null, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a list of INodeDirectorySnapshottable from the SnapshotManager
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
|
|
@ -1086,14 +1086,21 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void createSnapshot(String snapshotName, String snapshotRoot)
|
||||
public void createSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
if (!checkPathLength(snapshotRoot)) {
|
||||
throw new IOException("createSnapshot: Pathname too long. Limit "
|
||||
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
|
||||
}
|
||||
metrics.incrCreateSnapshotOps();
|
||||
namesystem.createSnapshot(snapshotName, snapshotRoot);
|
||||
namesystem.createSnapshot(snapshotRoot, snapshotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
metrics.incrDeleteSnapshotOps();
|
||||
namesystem.deleteSnapshot(snapshotRoot, snapshotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -63,6 +63,8 @@ public class NameNodeMetrics {
|
|||
MutableCounterLong disallowSnapshotOps;
|
||||
@Metric("Number of createSnapshot operations")
|
||||
MutableCounterLong createSnapshotOps;
|
||||
@Metric("Number of deleteSnapshot operations")
|
||||
MutableCounterLong deleteSnapshotOps;
|
||||
@Metric("Number of renameSnapshot operations")
|
||||
MutableCounterLong renameSnapshotOps;
|
||||
@Metric("Number of listSnapshottableDirectory operations")
|
||||
|
@ -181,6 +183,10 @@ public class NameNodeMetrics {
|
|||
createSnapshotOps.incr();
|
||||
}
|
||||
|
||||
public void incrDeleteSnapshotOps() {
|
||||
deleteSnapshotOps.incr();
|
||||
}
|
||||
|
||||
public void incrRenameSnapshotOps() {
|
||||
renameSnapshotOps.incr();
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -178,6 +179,48 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
|||
updateModificationTime(timestamp, null);
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the snapshot with the given name from {@link #snapshotsByNames},
|
||||
* and delete all the corresponding SnapshotDiff.
|
||||
*
|
||||
* @param snapshotName The name of the snapshot to be removed
|
||||
* @param collectedBlocks Used to collect information to update blocksMap
|
||||
* @return The removed snapshot. Null if no snapshot with the given name
|
||||
* exists.
|
||||
*/
|
||||
Snapshot removeSnapshot(String snapshotName,
|
||||
BlocksMapUpdateInfo collectedBlocks) throws SnapshotException {
|
||||
final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
|
||||
if (indexOfOld < 0) {
|
||||
throw new SnapshotException("Cannot delete snapshot " + snapshotName
|
||||
+ " from path " + this.getFullPathName()
|
||||
+ ": the snapshot does not exist.");
|
||||
} else {
|
||||
Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
|
||||
deleteDiffsForSnapshot(snapshot, this, collectedBlocks);
|
||||
return snapshot;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively delete SnapshotDiff associated with the given snapshot under a
|
||||
* directory
|
||||
*/
|
||||
private void deleteDiffsForSnapshot(Snapshot snapshot, INodeDirectory dir,
|
||||
BlocksMapUpdateInfo collectedBlocks) {
|
||||
if (dir instanceof INodeDirectoryWithSnapshot) {
|
||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
|
||||
sdir.deleteSnapshotDiff(snapshot, collectedBlocks);
|
||||
}
|
||||
ReadOnlyList<INode> children = dir.getChildrenList(null);
|
||||
for (INode child : children) {
|
||||
if (child instanceof INodeDirectory) {
|
||||
deleteDiffsForSnapshot(snapshot, (INodeDirectory) child,
|
||||
collectedBlocks);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace itself with {@link INodeDirectoryWithSnapshot} or
|
||||
|
|
|
@ -40,7 +40,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
/**
|
||||
* The difference between the current state and a previous snapshot
|
||||
* of an INodeDirectory.
|
||||
*
|
||||
*
|
||||
* <pre>
|
||||
* Two lists are maintained in the algorithm:
|
||||
* - c-list for newly created inodes
|
||||
* - d-list for the deleted inodes
|
||||
|
@ -75,6 +76,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
* 2.3.1. modify i in current and then create: impossible
|
||||
* 2.3.2. modify i in current and then delete: remove it from c-list (0, d)
|
||||
* 2.3.3. modify i in current and then modify: replace it in c-list (c, d)
|
||||
* </pre>
|
||||
*/
|
||||
static class Diff {
|
||||
/**
|
||||
|
@ -289,6 +291,85 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
List<INode> apply2Current(final List<INode> current) {
|
||||
return apply2Previous(current, deleted, created);
|
||||
}
|
||||
|
||||
/**
|
||||
* Combine the posterior diff with this diff. This function needs to called
|
||||
* before the posterior diff is to be deleted. In general we have:
|
||||
*
|
||||
* <pre>
|
||||
* 1. For (c, 0) in the posterior diff, check the inode in this diff:
|
||||
* 1.1 (c', 0) in this diff: impossible
|
||||
* 1.2 (0, d') in this diff: put in created --> (c, d')
|
||||
* 1.3 (c', d') in this diff: impossible
|
||||
* 1.4 (0, 0) in this diff: put in created --> (c, 0)
|
||||
* This is the same logic with {@link #create(INode)}.
|
||||
*
|
||||
* 2. For (0, d) in the posterior diff,
|
||||
* 2.1 (c', 0) in this diff: remove from old created --> (0, 0)
|
||||
* 2.2 (0, d') in this diff: impossible
|
||||
* 2.3 (c', d') in this diff: remove from old created --> (0, d')
|
||||
* 2.4 (0, 0) in this diff: put in deleted --> (0, d)
|
||||
* This is the same logic with {@link #delete(INode)}.
|
||||
*
|
||||
* 3. For (c, d) in the posterior diff,
|
||||
* 3.1 (c', 0) in this diff: replace old created --> (c, 0)
|
||||
* 3.2 (0, d') in this diff: impossible
|
||||
* 3.3 (c', d') in this diff: replace old created --> (c, d')
|
||||
* 3.4 (0, 0) in this diff: put in created and deleted --> (c, d)
|
||||
* This is the same logic with {@link #modify(INode, INode)}.
|
||||
* </pre>
|
||||
*
|
||||
* Note that after this function the postDiff will be deleted.
|
||||
*
|
||||
* @param the posterior diff to combine
|
||||
* @param collectedBlocks Used in case 2.3, 3.1, and 3.3 to collect
|
||||
* information for blocksMap update
|
||||
*/
|
||||
void combinePostDiff(Diff postDiff, BlocksMapUpdateInfo collectedBlocks) {
|
||||
while (postDiff.created != null && !postDiff.created.isEmpty()) {
|
||||
INode node = postDiff.created.remove(postDiff.created.size() - 1);
|
||||
int deletedIndex = search(postDiff.deleted, node);
|
||||
if (deletedIndex < 0) {
|
||||
// for case 1
|
||||
create(node);
|
||||
} else {
|
||||
// case 3
|
||||
int createdIndex = search(created, node);
|
||||
if (createdIndex < 0) {
|
||||
// 3.4
|
||||
create(node);
|
||||
insertDeleted(node, search(deleted, node));
|
||||
} else {
|
||||
// 3.1 and 3.3
|
||||
created.set(createdIndex, node);
|
||||
// for 3.1 and 3.3, if the node is an INodeFileWithLink, need to
|
||||
// remove d in the posterior diff from the circular list, also do
|
||||
// possible block deletion and blocksMap updating
|
||||
INode dInPost = postDiff.deleted.get(deletedIndex);
|
||||
if (dInPost instanceof INodeFileWithLink) {
|
||||
// dInPost must be an INodeFileWithLink
|
||||
((INodeFileWithLink) dInPost)
|
||||
.collectSubtreeBlocksAndClear(collectedBlocks);
|
||||
}
|
||||
}
|
||||
// also remove the inode from the deleted list
|
||||
postDiff.deleted.remove(deletedIndex);
|
||||
}
|
||||
}
|
||||
|
||||
while (postDiff.deleted != null && !postDiff.deleted.isEmpty()) {
|
||||
// case 2
|
||||
INode node = postDiff.deleted.remove(postDiff.deleted.size() - 1);
|
||||
Triple<Integer, INode, Integer> triple = delete(node);
|
||||
// for 2.3, if the node is an INodeFileWithLink, need to remove c' from
|
||||
// the circular list
|
||||
INode cInCurrent = triple.middle;
|
||||
if (cInCurrent instanceof INodeFileWithLink) {
|
||||
((INodeFileWithLink) cInCurrent)
|
||||
.collectSubtreeBlocksAndClear(collectedBlocks);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Convert the inode list to a compact string. */
|
||||
static String toString(List<INode> inodes) {
|
||||
|
@ -481,6 +562,37 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
super(that, adopt, that.getNsQuota(), that.getDsQuota());
|
||||
this.diffs = diffs != null? diffs: new ArrayList<SnapshotDiff>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the snapshot with the given name. The synchronization of the diff
|
||||
* list will be done outside.
|
||||
*
|
||||
* If the diff to remove is not the first one in the diff list, we need to
|
||||
* combine the diff with its previous one:
|
||||
*
|
||||
* @param snapshot The snapshot to be deleted
|
||||
* @param collectedBlocks Used to collect information for blocksMap update
|
||||
* @return The SnapshotDiff containing the deleted snapshot.
|
||||
* Null if the snapshot with the given name does not exist.
|
||||
*/
|
||||
SnapshotDiff deleteSnapshotDiff(Snapshot snapshot,
|
||||
BlocksMapUpdateInfo collectedBlocks) {
|
||||
int snapshotIndex = Collections.binarySearch(diffs, snapshot);
|
||||
if (snapshotIndex == -1) {
|
||||
return null;
|
||||
} else {
|
||||
SnapshotDiff diffToRemove = null;
|
||||
diffToRemove = diffs.remove(snapshotIndex);
|
||||
if (snapshotIndex > 0) {
|
||||
// combine the to-be-removed diff with its previous diff
|
||||
SnapshotDiff previousDiff = diffs.get(snapshotIndex - 1);
|
||||
previousDiff.diff.combinePostDiff(diffToRemove.diff, collectedBlocks);
|
||||
previousDiff.posteriorDiff = diffToRemove.posteriorDiff;
|
||||
diffToRemove.posteriorDiff = null;
|
||||
}
|
||||
return diffToRemove;
|
||||
}
|
||||
}
|
||||
|
||||
/** Add a {@link SnapshotDiff} for the given snapshot and directory. */
|
||||
SnapshotDiff addSnapshotDiff(Snapshot snapshot, INodeDirectory dir,
|
||||
|
|
|
@ -82,7 +82,7 @@ public class INodeFileWithLink extends INodeFile {
|
|||
* any other inode, collect them and update the block list.
|
||||
*/
|
||||
@Override
|
||||
protected int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
|
||||
public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
|
||||
if (next == this) {
|
||||
// this is the only remaining inode.
|
||||
super.collectSubtreeBlocksAndClear(info);
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
||||
|
||||
|
@ -100,17 +101,17 @@ public class SnapshotManager implements SnapshotStats {
|
|||
|
||||
/**
|
||||
* Create a snapshot of the given path.
|
||||
* @param snapshotName
|
||||
* The name of the snapshot.
|
||||
* @param path
|
||||
* The directory path where the snapshot will be taken.
|
||||
* @param snapshotName
|
||||
* The name of the snapshot.
|
||||
* @throws IOException
|
||||
* Throw IOException when 1) the given path does not lead to an
|
||||
* existing snapshottable directory, and/or 2) there exists a
|
||||
* snapshot with the given name for the directory, and/or 3)
|
||||
* snapshot number exceeds quota
|
||||
*/
|
||||
public void createSnapshot(final String snapshotName, final String path
|
||||
public void createSnapshot(final String path, final String snapshotName
|
||||
) throws IOException {
|
||||
// Find the source root directory path where the snapshot is taken.
|
||||
final INodesInPath i = fsdir.getMutableINodesInPath(path);
|
||||
|
@ -122,6 +123,27 @@ public class SnapshotManager implements SnapshotStats {
|
|||
snapshotID++;
|
||||
numSnapshots.getAndIncrement();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot for a snapshottable directory
|
||||
* @param path Path to the directory where the snapshot was taken
|
||||
* @param snapshotName Name of the snapshot to be deleted
|
||||
* @param collectedBlocks Used to collect information to update blocksMap
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteSnapshot(final String path, final String snapshotName,
|
||||
BlocksMapUpdateInfo collectedBlocks) throws IOException {
|
||||
// parse the path, and check if the path is a snapshot path
|
||||
INodesInPath inodesInPath = fsdir.getMutableINodesInPath(path.toString());
|
||||
// transfer the inode for path to an INodeDirectorySnapshottable.
|
||||
// the INodeDirectorySnapshottable#valueOf method will throw Exception
|
||||
// if the path is not for a snapshottable directory
|
||||
INodeDirectorySnapshottable dir = INodeDirectorySnapshottable.valueOf(
|
||||
inodesInPath.getLastINode(), path.toString());
|
||||
|
||||
dir.removeSnapshot(snapshotName, collectedBlocks);
|
||||
numSnapshots.getAndDecrement();
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename the given snapshot
|
||||
|
|
|
@ -452,8 +452,8 @@ message GetDataEncryptionKeyResponseProto {
|
|||
}
|
||||
|
||||
message CreateSnapshotRequestProto {
|
||||
required string snapshotName = 1;
|
||||
required string snapshotRoot = 2;
|
||||
required string snapshotRoot = 1;
|
||||
required string snapshotName = 2;
|
||||
}
|
||||
|
||||
message CreateSnapshotResponseProto { // void response
|
||||
|
@ -482,6 +482,14 @@ message DisallowSnapshotRequestProto {
|
|||
message DisallowSnapshotResponseProto {
|
||||
}
|
||||
|
||||
message DeleteSnapshotRequestProto {
|
||||
required string snapshotRoot = 1;
|
||||
required string snapshotName = 2;
|
||||
}
|
||||
|
||||
message DeleteSnapshotResponseProto { // void response
|
||||
}
|
||||
|
||||
service ClientNamenodeProtocol {
|
||||
rpc getBlockLocations(GetBlockLocationsRequestProto)
|
||||
returns(GetBlockLocationsResponseProto);
|
||||
|
@ -565,4 +573,6 @@ service ClientNamenodeProtocol {
|
|||
returns(DisallowSnapshotResponseProto);
|
||||
rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
|
||||
returns(GetSnapshottableDirListingResponseProto);
|
||||
rpc deleteSnapshot(DeleteSnapshotRequestProto)
|
||||
returns(DeleteSnapshotResponseProto);
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ public class TestSnapshotPathINodes {
|
|||
// Create a snapshot for the dir, and check the inodes for the path
|
||||
// pointing to a snapshot file
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
hdfs.createSnapshot("s1", sub1.toString());
|
||||
hdfs.createSnapshot(sub1, "s1");
|
||||
// The path when accessing the snapshot file of file1 is
|
||||
// /TestSnapshot/sub1/.snapshot/s1/file1
|
||||
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
|
||||
|
@ -251,7 +251,7 @@ public class TestSnapshotPathINodes {
|
|||
// Create a snapshot for the dir, and check the inodes for the path
|
||||
// pointing to a snapshot file
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
hdfs.createSnapshot("s2", sub1.toString());
|
||||
hdfs.createSnapshot(sub1, "s2");
|
||||
|
||||
// Delete the original file /TestSnapshot/sub1/file1
|
||||
hdfs.delete(file1, false);
|
||||
|
@ -310,7 +310,7 @@ public class TestSnapshotPathINodes {
|
|||
// Create a snapshot for the dir, and check the inodes for the path
|
||||
// pointing to a snapshot file
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
hdfs.createSnapshot("s4", sub1.toString());
|
||||
hdfs.createSnapshot(sub1, "s4");
|
||||
|
||||
// Add a new file /TestSnapshot/sub1/file3
|
||||
final Path file3 = new Path(sub1, "file3");
|
||||
|
@ -387,7 +387,7 @@ public class TestSnapshotPathINodes {
|
|||
// Create a snapshot for the dir, and check the inodes for the path
|
||||
// pointing to a snapshot file
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
hdfs.createSnapshot("s3", sub1.toString());
|
||||
hdfs.createSnapshot(sub1, "s3");
|
||||
|
||||
// Modify file1
|
||||
DFSTestUtil.appendFile(hdfs, file1, "the content for appending");
|
||||
|
|
|
@ -62,16 +62,16 @@ public class SnapshotTestHelper {
|
|||
* Create snapshot for a dir using a given snapshot name
|
||||
*
|
||||
* @param hdfs DistributedFileSystem instance
|
||||
* @param snapshottedDir The dir to be snapshotted
|
||||
* @param snapshotRoot The dir to be snapshotted
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @return The path of the snapshot root
|
||||
*/
|
||||
public static Path createSnapshot(DistributedFileSystem hdfs,
|
||||
Path snapshottedDir, String snapshotName) throws Exception {
|
||||
assertTrue(hdfs.exists(snapshottedDir));
|
||||
hdfs.allowSnapshot(snapshottedDir.toString());
|
||||
hdfs.createSnapshot(snapshotName, snapshottedDir.toString());
|
||||
return SnapshotTestHelper.getSnapshotRoot(snapshottedDir, snapshotName);
|
||||
Path snapshotRoot, String snapshotName) throws Exception {
|
||||
assertTrue(hdfs.exists(snapshotRoot));
|
||||
hdfs.allowSnapshot(snapshotRoot.toString());
|
||||
hdfs.createSnapshot(snapshotRoot, snapshotName);
|
||||
return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -105,14 +105,14 @@ public class TestNestedSnapshots {
|
|||
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, s1name);
|
||||
hdfs.allowSnapshot(foo.toString());
|
||||
print("allow snapshot " + foo);
|
||||
hdfs.createSnapshot(s1name, foo.toString());
|
||||
hdfs.createSnapshot(foo, s1name);
|
||||
print("create snapshot " + s1name);
|
||||
|
||||
final String s2name = "bar-s2";
|
||||
final Path s2path = SnapshotTestHelper.getSnapshotRoot(bar, s2name);
|
||||
hdfs.allowSnapshot(bar.toString());
|
||||
print("allow snapshot " + bar);
|
||||
hdfs.createSnapshot(s2name, bar.toString());
|
||||
hdfs.createSnapshot(bar, s2name);
|
||||
print("create snapshot " + s2name);
|
||||
|
||||
final Path file2 = new Path(bar, "file2");
|
||||
|
@ -153,7 +153,7 @@ public class TestNestedSnapshots {
|
|||
int s = 0;
|
||||
for(; s < SNAPSHOT_LIMIT; s++) {
|
||||
final String snapshotName = "s" + s;
|
||||
hdfs.createSnapshot(snapshotName, dirStr);
|
||||
hdfs.createSnapshot(dir, snapshotName);
|
||||
|
||||
//create a file occasionally
|
||||
if (s % step == 0) {
|
||||
|
@ -163,7 +163,7 @@ public class TestNestedSnapshots {
|
|||
}
|
||||
|
||||
try {
|
||||
hdfs.createSnapshot("s" + s, dirStr);
|
||||
hdfs.createSnapshot(dir, "s" + s);
|
||||
Assert.fail("Expected to fail to create snapshot, but didn't.");
|
||||
} catch(IOException ioe) {
|
||||
SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
|
||||
|
|
|
@ -203,7 +203,7 @@ public class TestSnapshot {
|
|||
FileStatus oldStatus = hdfs.getFileStatus(sub);
|
||||
|
||||
hdfs.allowSnapshot(dir.toString());
|
||||
hdfs.createSnapshot("s1", dir.toString());
|
||||
hdfs.createSnapshot(dir, "s1");
|
||||
hdfs.setTimes(sub, 100L, 100L);
|
||||
|
||||
Path snapshotPath = SnapshotTestHelper.getSnapshotPath(dir, "s1", "sub");
|
||||
|
@ -230,7 +230,7 @@ public class TestSnapshot {
|
|||
exception.expect(RemoteException.class);
|
||||
exception.expectMessage("Directory is not a snapshottable directory: "
|
||||
+ dir.toString());
|
||||
hdfs.createSnapshot("s1", dir.toString());
|
||||
hdfs.createSnapshot(dir, "s1");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,9 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -34,6 +37,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -129,7 +133,7 @@ public class TestSnapshotBlocksMap {
|
|||
snapshotFile0.toString());
|
||||
BlockInfo[] ssBlocks = ssINode0.getBlocks();
|
||||
// The snapshot of file1 should contain 1 block
|
||||
assertEquals(ssBlocks.length, 1);
|
||||
assertEquals(1, ssBlocks.length);
|
||||
|
||||
// Delete file0
|
||||
hdfs.delete(file0, true);
|
||||
|
@ -146,7 +150,25 @@ public class TestSnapshotBlocksMap {
|
|||
INodeFile ssINode1 = INodeFile.valueOf(
|
||||
dir.getINode(snapshot1File0.toString()), snapshot1File0.toString());
|
||||
assertTrue(bcAfterDeletion == ssINode0 || bcAfterDeletion == ssINode1);
|
||||
assertEquals(bcAfterDeletion.getBlocks().length, 1);
|
||||
assertEquals(1, bcAfterDeletion.getBlocks().length);
|
||||
|
||||
// Delete snapshot s1
|
||||
hdfs.deleteSnapshot(sub1, "s1");
|
||||
// Make sure the first block of file0 is still in blocksMap
|
||||
BlockInfo blockInfoAfterSnapshotDeletion = bm.getStoredBlock(blocks[0]);
|
||||
assertNotNull(blockInfoAfterSnapshotDeletion);
|
||||
BlockCollection bcAfterSnapshotDeletion = blockInfoAfterSnapshotDeletion
|
||||
.getBlockCollection();
|
||||
assertTrue(bcAfterSnapshotDeletion == ssINode0);
|
||||
assertEquals(1, bcAfterSnapshotDeletion.getBlocks().length);
|
||||
try {
|
||||
ssINode1 = INodeFile.valueOf(
|
||||
dir.getINode(snapshot1File0.toString()), snapshot1File0.toString());
|
||||
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("File does not exist: "
|
||||
+ snapshot1File0.toString(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: test for deletion file which was appended after taking snapshots
|
||||
|
|
|
@ -17,13 +17,20 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
|
@ -55,7 +62,7 @@ public class TestSnapshotDeletion {
|
|||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
||||
.build();
|
||||
.format(true).build();
|
||||
cluster.waitActive();
|
||||
|
||||
fsn = cluster.getNamesystem();
|
||||
|
@ -81,7 +88,7 @@ public class TestSnapshotDeletion {
|
|||
|
||||
// Allow snapshot for sub1, and create snapshot for it
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
hdfs.createSnapshot("s1", sub1.toString());
|
||||
hdfs.createSnapshot(sub1, "s1");
|
||||
|
||||
// Deleting a snapshottable dir with snapshots should fail
|
||||
exception.expect(RemoteException.class);
|
||||
|
@ -109,7 +116,7 @@ public class TestSnapshotDeletion {
|
|||
|
||||
// Allow snapshot for subsub1, and create snapshot for it
|
||||
hdfs.allowSnapshot(subsub1.toString());
|
||||
hdfs.createSnapshot("s1", subsub1.toString());
|
||||
hdfs.createSnapshot(subsub1, "s1");
|
||||
|
||||
// Deleting dir while its descedant subsub1 having snapshots should fail
|
||||
exception.expect(RemoteException.class);
|
||||
|
@ -119,4 +126,190 @@ public class TestSnapshotDeletion {
|
|||
exception.expectMessage(error);
|
||||
hdfs.delete(dir, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test deleting the oldest (first) snapshot. We do not need to combine
|
||||
* snapshot diffs in this simplest scenario.
|
||||
*/
|
||||
@Test
|
||||
public void testDeleteOldestSnapshot() throws Exception {
|
||||
// create files under sub1
|
||||
Path file0 = new Path(sub1, "file0");
|
||||
Path file1 = new Path(sub1, "file1");
|
||||
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
|
||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
String snapshotName = "s1";
|
||||
try {
|
||||
hdfs.deleteSnapshot(sub1, snapshotName);
|
||||
fail("SnapshotException expected: " + sub1.toString()
|
||||
+ " is not snapshottable yet");
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Directory is not a snapshottable directory: " + sub1, e);
|
||||
}
|
||||
|
||||
// make sub1 snapshottable
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
try {
|
||||
hdfs.deleteSnapshot(sub1, snapshotName);
|
||||
fail("SnapshotException expected: snapshot " + snapshotName
|
||||
+ " does not exist for " + sub1.toString());
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains("Cannot delete snapshot "
|
||||
+ snapshotName + " from path " + sub1.toString()
|
||||
+ ": the snapshot does not exist.", e);
|
||||
}
|
||||
|
||||
// create snapshot s1 for sub1
|
||||
hdfs.createSnapshot(sub1, snapshotName);
|
||||
// delete s1
|
||||
hdfs.deleteSnapshot(sub1, snapshotName);
|
||||
// now we can create a snapshot with the same name
|
||||
hdfs.createSnapshot(sub1, snapshotName);
|
||||
|
||||
// create a new file under sub1
|
||||
Path newFile = new Path(sub1, "newFile");
|
||||
DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
|
||||
// create another snapshot s2
|
||||
String snapshotName2 = "s2";
|
||||
hdfs.createSnapshot(sub1, snapshotName2);
|
||||
// Get the filestatus of sub1 under snapshot s2
|
||||
Path ss = SnapshotTestHelper
|
||||
.getSnapshotPath(sub1, snapshotName2, "newFile");
|
||||
FileStatus statusBeforeDeletion = hdfs.getFileStatus(ss);
|
||||
// delete s1
|
||||
hdfs.deleteSnapshot(sub1, snapshotName);
|
||||
FileStatus statusAfterDeletion = hdfs.getFileStatus(ss);
|
||||
assertEquals(statusBeforeDeletion.toString(),
|
||||
statusAfterDeletion.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test deleting snapshots in a more complicated scenario: need to combine
|
||||
* snapshot diffs, but no need to handle diffs distributed in a dir tree
|
||||
*/
|
||||
@Test
|
||||
public void testDeleteSnapshot1() throws Exception {
|
||||
testDeleteSnapshot(sub1, "");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test deleting snapshots in more complicated scenarios (snapshot diffs are
|
||||
* distributed in the directory sub-tree)
|
||||
*/
|
||||
@Test
|
||||
public void testDeleteSnapshot2() throws Exception {
|
||||
testDeleteSnapshot(sub1, "subsub1/subsubsub1/");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test snapshot deletion
|
||||
* @param snapshotRoot The dir where the snapshots are created
|
||||
* @param modDirStr The snapshotRoot itself or one of its sub-directory,
|
||||
* where the modifications happen. It is represented as a relative
|
||||
* path to the snapshotRoot.
|
||||
*/
|
||||
private void testDeleteSnapshot(Path snapshotRoot, String modDirStr)
|
||||
throws Exception {
|
||||
Path modDir = modDirStr.isEmpty() ? snapshotRoot : new Path(snapshotRoot,
|
||||
modDirStr);
|
||||
Path file10 = new Path(modDir, "file10");
|
||||
Path file11 = new Path(modDir, "file11");
|
||||
Path file12 = new Path(modDir, "file12");
|
||||
Path file13 = new Path(modDir, "file13");
|
||||
Path file14 = new Path(modDir, "file14");
|
||||
Path file15 = new Path(modDir, "file15");
|
||||
DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, (short) (REPLICATION - 1),
|
||||
seed);
|
||||
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, (short) (REPLICATION - 1),
|
||||
seed);
|
||||
DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, (short) (REPLICATION - 1),
|
||||
seed);
|
||||
DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, (short) (REPLICATION - 1),
|
||||
seed);
|
||||
// create snapshot s1 for snapshotRoot
|
||||
hdfs.allowSnapshot(snapshotRoot.toString());
|
||||
hdfs.createSnapshot(snapshotRoot, "s1");
|
||||
|
||||
// delete file11
|
||||
hdfs.delete(file11, true);
|
||||
// modify file12
|
||||
hdfs.setReplication(file12, REPLICATION);
|
||||
// modify file13
|
||||
hdfs.setReplication(file13, REPLICATION);
|
||||
// create file14
|
||||
DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
|
||||
// create file15
|
||||
DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
// create snapshot s2 for snapshotRoot
|
||||
hdfs.createSnapshot(snapshotRoot, "s2");
|
||||
|
||||
// create file11 again: (0, d) + (c, 0)
|
||||
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, seed);
|
||||
// delete file12: (c, d) + (0, d)
|
||||
hdfs.delete(file12, true);
|
||||
// modify file13: (c, d) + (c, d)
|
||||
hdfs.setReplication(file13, (short) (REPLICATION - 2));
|
||||
// delete file14: (c, 0) + (0, d)
|
||||
hdfs.delete(file14, true);
|
||||
// modify file15: (c, 0) + (c, d)
|
||||
hdfs.setReplication(file15, (short) (REPLICATION - 1));
|
||||
|
||||
// create snapshot s3 for snapshotRoot
|
||||
hdfs.createSnapshot(snapshotRoot, "s3");
|
||||
// modify file10, to check if the posterior diff was set correctly
|
||||
hdfs.setReplication(file10, (short) (REPLICATION - 1));
|
||||
|
||||
Path file10_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
|
||||
modDirStr + "file10");
|
||||
Path file11_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
|
||||
modDirStr + "file11");
|
||||
Path file12_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
|
||||
modDirStr + "file12");
|
||||
Path file13_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
|
||||
modDirStr + "file13");
|
||||
FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
|
||||
FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
|
||||
FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
|
||||
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
|
||||
|
||||
// delete s2, in which process we need to combine the diff in s2 to s1
|
||||
hdfs.deleteSnapshot(snapshotRoot, "s2");
|
||||
// check the correctness of s1
|
||||
FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
|
||||
FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
|
||||
FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
|
||||
FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
|
||||
assertEquals(statusBeforeDeletion10.toString(),
|
||||
statusAfterDeletion10.toString());
|
||||
assertEquals(statusBeforeDeletion11.toString(),
|
||||
statusAfterDeletion11.toString());
|
||||
assertEquals(statusBeforeDeletion12.toString(),
|
||||
statusAfterDeletion12.toString());
|
||||
assertEquals(statusBeforeDeletion13.toString(),
|
||||
statusAfterDeletion13.toString());
|
||||
|
||||
// make sure file14 and file15 are not included in s1
|
||||
Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
|
||||
modDirStr + "file14");
|
||||
assertFalse(hdfs.exists(file14_s1));
|
||||
Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
|
||||
modDirStr + "file15");
|
||||
assertFalse(hdfs.exists(file15_s1));
|
||||
|
||||
// call INodeFileWithLink#getBlockReplication, check the correctness of the
|
||||
// circular list after snapshot deletion
|
||||
INodeFile nodeFile13 = INodeFile.valueOf(
|
||||
fsn.getFSDirectory().getINode(file13.toString()), file13.toString());
|
||||
short blockReplicationFile13 = nodeFile13.getBlockReplication();
|
||||
assertEquals(REPLICATION - 1, blockReplicationFile13);
|
||||
INodeFile nodeFile12 = INodeFile.valueOf(
|
||||
fsn.getFSDirectory().getINode(file12_s1.toString()),
|
||||
file12_s1.toString());
|
||||
short blockReplicationFile12 = nodeFile12.getBlockReplication();
|
||||
assertEquals(REPLICATION - 1, blockReplicationFile12);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TestSnapshotRename {
|
|||
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s3");
|
||||
|
||||
// Rename s3 to s22
|
||||
hdfs.renameSnapshot(sub1.toString(), "s3", "s22");
|
||||
hdfs.renameSnapshot(sub1, "s3", "s22");
|
||||
// Check the snapshots list
|
||||
INodeDirectorySnapshottable srcRoot = INodeDirectorySnapshottable.valueOf(
|
||||
fsdir.getINode(sub1.toString()), sub1.toString());
|
||||
|
@ -118,12 +118,12 @@ public class TestSnapshotRename {
|
|||
new String[] { "s1", "s2", "s22" });
|
||||
|
||||
// Rename s1 to s4
|
||||
hdfs.renameSnapshot(sub1.toString(), "s1", "s4");
|
||||
hdfs.renameSnapshot(sub1, "s1", "s4");
|
||||
checkSnapshotList(srcRoot, new String[] { "s2", "s22", "s4" },
|
||||
new String[] { "s4", "s2", "s22" });
|
||||
|
||||
// Rename s22 to s0
|
||||
hdfs.renameSnapshot(sub1.toString(), "s22", "s0");
|
||||
hdfs.renameSnapshot(sub1, "s22", "s0");
|
||||
checkSnapshotList(srcRoot, new String[] { "s0", "s2", "s4" },
|
||||
new String[] { "s4", "s2", "s0" });
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ public class TestSnapshotRename {
|
|||
FileStatus statusBeforeRename = hdfs.getFileStatus(ssPath);
|
||||
|
||||
// Rename the snapshot
|
||||
hdfs.renameSnapshot(sub1.toString(), "s1", "s2");
|
||||
hdfs.renameSnapshot(sub1, "s1", "s2");
|
||||
// <sub1>/.snapshot/s1/file1 should no longer exist
|
||||
assertFalse(hdfs.exists(ssPath));
|
||||
snapshotRoot = SnapshotTestHelper.getSnapshotRoot(sub1, "s2");
|
||||
|
@ -170,7 +170,7 @@ public class TestSnapshotRename {
|
|||
String error = "The snapshot wrongName does not exist for directory "
|
||||
+ sub1.toString();
|
||||
exception.expectMessage(error);
|
||||
hdfs.renameSnapshot(sub1.toString(), "wrongName", "s2");
|
||||
hdfs.renameSnapshot(sub1, "wrongName", "s2");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -187,6 +187,6 @@ public class TestSnapshotRename {
|
|||
String error = "The snapshot s2 already exists for directory "
|
||||
+ sub1.toString();
|
||||
exception.expectMessage(error);
|
||||
hdfs.renameSnapshot(sub1.toString(), "s1", "s2");
|
||||
hdfs.renameSnapshot(sub1, "s1", "s2");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,8 +107,8 @@ public class TestSnapshottableDirListing {
|
|||
// Make dir2 snapshottable again
|
||||
hdfs.allowSnapshot(dir2.toString());
|
||||
// Create a snapshot for dir2
|
||||
hdfs.createSnapshot("s1", dir2.toString());
|
||||
hdfs.createSnapshot("s2", dir2.toString());
|
||||
hdfs.createSnapshot(dir2, "s1");
|
||||
hdfs.createSnapshot(dir2, "s2");
|
||||
dirs = hdfs.getSnapshottableDirListing();
|
||||
// There are now 2 snapshots for dir2
|
||||
assertEquals(dir2, dirs[1].getFullPath());
|
||||
|
|
Loading…
Reference in New Issue