HDFS-4196. Support renaming of snapshots. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1410986 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-18 20:44:10 +00:00
parent b5a2dd19c4
commit 9bed64a6fc
18 changed files with 529 additions and 17 deletions

View File

@ -76,3 +76,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4187. Add tests for replication handling in snapshots. (Jing Zhao via
szetszwo)
HDFS-4196. Support renaming of snapshots. (Jing Zhao via szetszwo)

View File

@ -1897,8 +1897,23 @@ public boolean setSafeMode(SafeModeAction action) throws IOException {
*/
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
checkOpen();
namenode.createSnapshot(snapshotName, snapshotRoot);
}
/**
* Rename a snapshot.
* @param snapshotDir The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot
* @throws IOException
* @see ClientProtocol#renameSnapshot(String, String, String)
*/
public void renameSnapshot(String snapshotDir, String snapshotOldName,
String snapshotNewName) throws IOException {
checkOpen();
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
}
/**
* Allow snapshot on a directory.
@ -1906,6 +1921,7 @@ public void createSnapshot(String snapshotName, String snapshotRoot)
* @see ClientProtocol#allowSnapshot(String snapshotRoot)
*/
public void allowSnapshot(String snapshotRoot) throws IOException {
checkOpen();
namenode.allowSnapshot(snapshotRoot);
}
@ -1915,6 +1931,7 @@ public void allowSnapshot(String snapshotRoot) throws IOException {
* @see ClientProtocol#disallowSnapshot(String snapshotRoot)
*/
public void disallowSnapshot(String snapshotRoot) throws IOException {
checkOpen();
namenode.disallowSnapshot(snapshotRoot);
}

View File

@ -912,4 +912,16 @@ public void createSnapshot(String snapshotName, String path)
throws IOException {
dfs.createSnapshot(snapshotName, path);
}
/**
* Rename a snapshot
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot
* @throws IOException
*/
public void renameSnapshot(String path, String snapshotOldName,
String snapshotNewName) throws IOException {
dfs.renameSnapshot(path, snapshotOldName, snapshotNewName);
}
}

View File

@ -968,10 +968,21 @@ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
* Create a snapshot
* @param snapshotName name of the snapshot created
* @param snapshotRoot the path that is being snapshotted
* @throws IOException
*/
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException;
/**
* Rename a snapshot
* @param snapshotRoot the directory path where the snapshot was taken
* @param snapshotOldName old name of the snapshot
* @param snapshotNewName new name of the snapshot
* @throws IOException
*/
public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException;
/**
* Allow snapshot on a directory.
* @param snapshotRoot the directory to be snapped

View File

@ -101,6 +101,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
@ -156,6 +158,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
final private ClientProtocol server;
static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
CreateSnapshotResponseProto.newBuilder().build();
static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE =
RenameSnapshotResponseProto.newBuilder().build();
static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE =
AllowSnapshotResponseProto.newBuilder().build();
static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE =
@ -889,4 +893,16 @@ public DisallowSnapshotResponseProto disallowSnapshot(RpcController controller,
throw new ServiceException(e);
}
}
@Override
public RenameSnapshotResponseProto renameSnapshot(RpcController controller,
RenameSnapshotRequestProto request) throws ServiceException {
try {
server.renameSnapshot(request.getSnapshotRoot(),
request.getSnapshotOldName(), request.getSnapshotNewName());
return VOID_RENAME_SNAPSHOT_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -86,6 +86,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
@ -864,4 +865,17 @@ public void disallowSnapshot(String snapshotRoot) throws IOException {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException {
RenameSnapshotRequestProto req = RenameSnapshotRequestProto.newBuilder()
.setSnapshotRoot(snapshotRoot).setSnapshotOldName(snapshotOldName)
.setSnapshotNewName(snapshotNewName).build();
try {
rpcProxy.renameSnapshot(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -59,6 +59,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
@ -879,6 +880,13 @@ void logDeleteSnapshot(String snapName, String snapRoot) {
logEdit(op);
}
void logRenameSnapshot(String path, String snapOldName, String snapNewName) {
RenameSnapshotOp op = RenameSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(path).setSnapshotOldName(snapOldName)
.setSnapshotNewName(snapNewName);
logEdit(op);
}
void logAllowSnapshot(String path) {
AllowSnapshotOp op = AllowSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(path);

View File

@ -119,6 +119,7 @@ public OpInstanceCache() {
inst.put(OP_DISALLOW_SNAPSHOT, new DisallowSnapshotOp());
inst.put(OP_CREATE_SNAPSHOT, new CreateSnapshotOp());
inst.put(OP_DELETE_SNAPSHOT, new DeleteSnapshotOp());
inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp());
}
public FSEditLogOp get(FSEditLogOpCodes opcode) {
@ -2286,6 +2287,79 @@ public String toString() {
return builder.toString();
}
}
/**
* Operation corresponding to rename a snapshot
*/
static class RenameSnapshotOp extends FSEditLogOp {
String snapshotRoot;
String snapshotOldName;
String snapshotNewName;
RenameSnapshotOp() {
super(OP_RENAME_SNAPSHOT);
}
static RenameSnapshotOp getInstance(OpInstanceCache cache) {
return (RenameSnapshotOp) cache.get(OP_RENAME_SNAPSHOT);
}
RenameSnapshotOp setSnapshotOldName(String snapshotOldName) {
this.snapshotOldName = snapshotOldName;
return this;
}
RenameSnapshotOp setSnapshotNewName(String snapshotNewName) {
this.snapshotNewName = snapshotNewName;
return this;
}
RenameSnapshotOp setSnapshotRoot(String snapshotRoot) {
this.snapshotRoot = snapshotRoot;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
snapshotOldName = FSImageSerialization.readString(in);
snapshotNewName = FSImageSerialization.readString(in);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
FSImageSerialization.writeString(snapshotOldName, out);
FSImageSerialization.writeString(snapshotNewName, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTOLDNAME", snapshotOldName);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNEWNAME", snapshotNewName);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
snapshotOldName = st.getValue("SNAPSHOTOLDNAME");
snapshotNewName = st.getValue("SNAPSHOTNEWNAME");
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenameSnapshotOp [snapshotRoot=");
builder.append(snapshotRoot);
builder.append(", snapshotOldName=");
builder.append(snapshotOldName);
builder.append(", snapshotNewName=");
builder.append(snapshotNewName);
builder.append("]");
return builder.toString();
}
}
/**
* Operation corresponding to allow creating snapshot on a directory

View File

@ -59,8 +59,9 @@ public enum FSEditLogOpCodes {
OP_UPDATE_BLOCKS ((byte) 25),
OP_CREATE_SNAPSHOT ((byte) 26),
OP_DELETE_SNAPSHOT ((byte) 27),
OP_ALLOW_SNAPSHOT ((byte) 28),
OP_DISALLOW_SNAPSHOT ((byte) 29);
OP_RENAME_SNAPSHOT ((byte) 28),
OP_ALLOW_SNAPSHOT ((byte) 29),
OP_DISALLOW_SNAPSHOT ((byte) 30);
private byte opCode;

View File

@ -5645,19 +5645,59 @@ public void createSnapshot(String snapshotName, String path)
dir.writeLock();
try {
snapshotManager.createSnapshot(snapshotName, path);
getEditLog().logCreateSnapshot(snapshotName, path);
} finally {
dir.writeUnlock();
}
getEditLog().logCreateSnapshot(snapshotName, path);
} finally {
writeUnlock();
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
Path snapshotRoot = new Path(path, ".snapshot/" + snapshotName);
Path snapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR + "/"
+ snapshotName);
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
"createSnapshot", path, snapshotRoot.toString(), null);
}
}
/**
* Rename a snapshot
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old snapshot name
* @param snapshotNewName New snapshot name
* @throws SafeModeException
* @throws IOException
*/
public void renameSnapshot(String path, String snapshotOldName,
String snapshotNewName) throws SafeModeException, IOException {
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException("Cannot rename snapshot for " + path,
safeMode);
}
checkOwner(path);
// TODO: check if the new name is valid. May also need this for
// creationSnapshot
snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName);
getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName);
} finally {
writeUnlock();
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
Path oldSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR
+ "/" + snapshotOldName);
Path newSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR
+ "/" + snapshotNewName);
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
"renameSnapshot", oldSnapshotRoot.toString(),
newSnapshotRoot.toString(), null);
}
}
}

View File

@ -262,7 +262,7 @@ public byte[] getLocalNameBytes() {
/**
* Set local file name
*/
protected void setLocalName(String name) {
public void setLocalName(String name) {
this.name = DFSUtil.string2Bytes(name);
}

View File

@ -1098,4 +1098,11 @@ public void disallowSnapshot(String snapshot) throws IOException {
metrics.incrDisAllowSnapshotOps();
namesystem.disallowSnapshot(snapshot);
}
@Override
public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException {
metrics.incrRenameSnapshotOps();
namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
}
}

View File

@ -63,6 +63,8 @@ public class NameNodeMetrics {
MutableCounterLong disallowSnapshotOps;
@Metric("Number of createSnapshot operations")
MutableCounterLong createSnapshotOps;
@Metric("Number of renameSnapshot operations")
MutableCounterLong renameSnapshotOps;
@Metric("Journal transactions") MutableRate transactions;
@Metric("Journal syncs") MutableRate syncs;
@ -177,6 +179,10 @@ public void incrCreateSnapshotOps() {
createSnapshotOps.incr();
}
public void incrRenameSnapshotOps() {
renameSnapshotOps.incr();
}
public void addTransaction(long latency) {
transactions.add(latency);
}

View File

@ -26,11 +26,14 @@
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
/**
* Directories where taking snapshots is allowed.
*
@ -67,7 +70,23 @@ static public INodeDirectorySnapshottable valueOf(
private final List<Snapshot> snapshots = new ArrayList<Snapshot>();
/** Snapshots of this directory in ascending order of snapshot names. */
private final List<Snapshot> snapshotsByNames = new ArrayList<Snapshot>();
/**
* @return {@link #snapshots}
*/
@VisibleForTesting
List<Snapshot> getSnapshots() {
return snapshots;
}
/**
* @return {@link #snapshotsByNames}
*/
@VisibleForTesting
List<Snapshot> getSnapshotsByNames() {
return snapshotsByNames;
}
/** Number of snapshots allowed. */
private int snapshotQuota;
@ -90,6 +109,48 @@ public Snapshot getSnapshot(byte[] snapshotName) {
final int i = searchSnapshot(snapshotName);
return i < 0? null: snapshotsByNames.get(i);
}
/**
* Rename a snapshot
* @param path
* The directory path where the snapshot was taken. Used for
* generating exception message.
* @param oldName
* Old name of the snapshot
* @param newName
* New name the snapshot will be renamed to
* @throws SnapshotException
* Throw SnapshotException when either the snapshot with the old
* name does not exist or a snapshot with the new name already
* exists
*/
public void renameSnapshot(String path, String oldName, String newName)
throws SnapshotException {
if (newName.equals(oldName)) {
return;
}
final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
if (indexOfOld < 0) {
throw new SnapshotException("The snapshot " + oldName
+ " does not exist for directory " + path);
} else {
int indexOfNew = searchSnapshot(DFSUtil.string2Bytes(newName));
if (indexOfNew > 0) {
throw new SnapshotException("The snapshot " + newName
+ " already exists for directory " + path);
}
// remove the one with old name from snapshotsByNames
Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
INodeDirectoryWithSnapshot ssRoot = snapshot.getRoot();
ssRoot.setLocalName(newName);
indexOfNew = -indexOfNew - 1;
if (indexOfNew <= indexOfOld) {
snapshotsByNames.add(indexOfNew, snapshot);
} else { // indexOfNew > indexOfOld
snapshotsByNames.add(indexOfNew - 1, snapshot);
}
}
}
/** @return the last snapshot. */
public Snapshot getLastSnapshot() {

View File

@ -31,7 +31,18 @@
import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
/** Manage snapshottable directories and their snapshots. */
/**
* Manage snapshottable directories and their snapshots.
*
* This class includes operations that create, access, modify snapshots and/or
* snapshot-related data. In general, the locking structure of snapshot
* operations is: <br>
*
* 1. Lock the {@link FSNamesystem} lock in {@link FSNamesystem} before calling
* into {@link SnapshotManager} methods.<br>
* 2. Lock the {@link FSDirectory} lock for the {@link SnapshotManager} methods
* if necessary.
*/
public class SnapshotManager implements SnapshotStats {
private final FSNamesystem namesystem;
private final FSDirectory fsdir;
@ -95,25 +106,53 @@ public void resetSnapshottable(final String path
/**
* Create a snapshot of the given path.
*
* @param snapshotName The name of the snapshot.
* @param path The directory path where the snapshot will be taken.
* @param snapshotName
* The name of the snapshot.
* @param path
* The directory path where the snapshot will be taken.
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) there exists a
* snapshot with the given name for the directory, and/or 3)
* snapshot number exceeds quota
*/
public void createSnapshot(final String snapshotName, final String path
) throws IOException {
// Find the source root directory path where the snapshot is taken.
final INodeDirectorySnapshottable srcRoot
= INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
synchronized(this) {
final Snapshot s = srcRoot.addSnapshot(snapshotID, snapshotName);
new SnapshotCreation().processRecursively(srcRoot, s.getRoot());
final Snapshot s = srcRoot.addSnapshot(snapshotID, snapshotName);
new SnapshotCreation().processRecursively(srcRoot, s.getRoot());
//create success, update id
snapshotID++;
}
//create success, update id
snapshotID++;
numSnapshots.getAndIncrement();
}
/**
* Rename the given snapshot
* @param path
* The directory path where the snapshot was taken
* @param oldSnapshotName
* Old name of the snapshot
* @param newSnapshotName
* New name of the snapshot
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) the snapshot with the
* old name does not exist for the directory, and/or 3) there exists
* a snapshot with the new name for the directory
*/
public void renameSnapshot(final String path, final String oldSnapshotName,
final String newSnapshotName) throws IOException {
// Find the source root directory path where the snapshot was taken.
// All the check for path has been included in the valueOf method.
final INodeDirectorySnapshottable srcRoot
= INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
// Note that renameSnapshot and createSnapshot are synchronized externally
// through FSNamesystem's write lock
srcRoot.renameSnapshot(path, oldSnapshotName, newSnapshotName);
}
/**
* Create a snapshot of subtrees by recursively coping the directory

View File

@ -451,6 +451,15 @@ message CreateSnapshotRequestProto {
message CreateSnapshotResponseProto { // void response
}
message RenameSnapshotRequestProto {
required string snapshotRoot = 1;
required string snapshotOldName = 2;
required string snapshotNewName = 3;
}
message RenameSnapshotResponseProto { // void response
}
message AllowSnapshotRequestProto {
required string snapshotRoot = 1;
}
@ -540,6 +549,8 @@ service ClientNamenodeProtocol {
returns(GetDataEncryptionKeyResponseProto);
rpc createSnapshot(CreateSnapshotRequestProto)
returns(CreateSnapshotResponseProto);
rpc renameSnapshot(RenameSnapshotRequestProto)
returns(RenameSnapshotResponseProto);
rpc allowSnapshot(AllowSnapshotRequestProto)
returns(AllowSnapshotResponseProto);
rpc disallowSnapshot(DisallowSnapshotRequestProto)

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
/**
* Helper for writing snapshot related tests
@ -33,7 +34,8 @@ private SnapshotTestHelper() {
}
public static Path getSnapshotRoot(Path snapshottedDir, String snapshotName) {
return new Path(snapshottedDir, ".snapshot/" + snapshotName);
return new Path(snapshottedDir, HdfsConstants.DOT_SNAPSHOT_DIR + "/"
+ snapshotName);
}
public static Path getSnapshotPath(Path snapshottedDir, String snapshotName,

View File

@ -0,0 +1,191 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Test for renaming snapshot
*/
public class TestSnapshotRename {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
private final Path file1 = new Path(sub1, "file1");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
FSDirectory fsdir;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
fsdir = fsn.getFSDirectory();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* Check the correctness of snapshot list within
* {@link INodeDirectorySnapshottable}
*/
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
String[] sortedNames, String[] names) {
List<Snapshot> listByName = srcRoot.getSnapshotsByNames();
assertEquals(sortedNames.length, listByName.size());
for (int i = 0; i < listByName.size(); i++) {
assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
}
List<Snapshot> listByTime = srcRoot.getSnapshots();
assertEquals(names.length, listByTime.size());
for (int i = 0; i < listByTime.size(); i++) {
assertEquals(names[i], listByTime.get(i).getRoot().getLocalName());
}
}
/**
* Rename snapshot(s), and check the correctness of the snapshot list within
* {@link INodeDirectorySnapshottable}
*/
@Test
public void testSnapshotList() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create three snapshots for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s3");
// Rename s3 to s22
hdfs.renameSnapshot(sub1.toString(), "s3", "s22");
// Check the snapshots list
INodeDirectorySnapshottable srcRoot = INodeDirectorySnapshottable.valueOf(
fsdir.getINode(sub1.toString()), sub1.toString());
checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" },
new String[] { "s1", "s2", "s22" });
// Rename s1 to s4
hdfs.renameSnapshot(sub1.toString(), "s1", "s4");
checkSnapshotList(srcRoot, new String[] { "s2", "s22", "s4" },
new String[] { "s4", "s2", "s22" });
// Rename s22 to s0
hdfs.renameSnapshot(sub1.toString(), "s22", "s0");
checkSnapshotList(srcRoot, new String[] { "s0", "s2", "s4" },
new String[] { "s4", "s2", "s0" });
}
/**
* Test FileStatus of snapshot file before/after rename
*/
@Test
public void testSnapshotRename() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshot for sub1
Path snapshotRoot = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
Path ssPath = new Path(snapshotRoot, file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusBeforeRename = hdfs.getFileStatus(ssPath);
// Rename the snapshot
hdfs.renameSnapshot(sub1.toString(), "s1", "s2");
// <sub1>/.snapshot/s1/file1 should no longer exist
assertFalse(hdfs.exists(ssPath));
snapshotRoot = SnapshotTestHelper.getSnapshotRoot(sub1, "s2");
ssPath = new Path(snapshotRoot, file1.getName());
// Instead, <sub1>/.snapshot/s2/file1 should exist
assertTrue(hdfs.exists(ssPath));
FileStatus statusAfterRename = hdfs.getFileStatus(ssPath);
// FileStatus of the snapshot should not change except the path
assertFalse(statusBeforeRename.equals(statusAfterRename));
statusBeforeRename.setPath(statusAfterRename.getPath());
assertEquals(statusBeforeRename.toString(), statusAfterRename.toString());
}
/**
* Test rename a non-existing snapshot
*/
@Test
public void testRenameNonExistingSnapshot() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshot for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
exception.expect(RemoteException.class);
String error = "The snapshot wrongName does not exist for directory "
+ sub1.toString();
exception.expectMessage(error);
hdfs.renameSnapshot(sub1.toString(), "wrongName", "s2");
}
/**
* Test rename a snapshot to another existing snapshot
*/
@Test
public void testRenameToExistingSnapshot() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshots for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
exception.expect(RemoteException.class);
String error = "The snapshot s2 already exists for directory "
+ sub1.toString();
exception.expectMessage(error);
hdfs.renameSnapshot(sub1.toString(), "s1", "s2");
}
}