HDFS-4149. Implement the disallowSnapshot(..) in FSNamesystem and add resetSnapshottable(..) to SnapshotManager.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1405683 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-05 00:40:54 +00:00
parent deaf979d41
commit f84000900a
10 changed files with 213 additions and 104 deletions

View File

@ -2222,9 +2222,9 @@ public abstract class FileSystem extends Configured implements Closeable {
/** /**
* Create a snapshot * Create a snapshot
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
* @param snapshotRoot The directory where the snapshot will be taken * @param path The directory where snapshots will be taken.
*/ */
public void createSnapshot(String snapshotName, String snapshotRoot) public void createSnapshot(String snapshotName, String path)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot"); + " doesn't support createSnapshot");

View File

@ -50,3 +50,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and
initialize root directory as snapshottable. (szetszwo) initialize root directory as snapshottable. (szetszwo)
HDFS-4149. Implement the disallowSnapshot(..) in FSNamesystem and add
resetSnapshottable(..) to SnapshotManager. (szetszwo)

View File

@ -872,28 +872,26 @@ public class DistributedFileSystem extends FileSystem {
/** /**
* Allow snapshot on a directory. * Allow snapshot on a directory.
* *
* @param snapshotRoot the directory to be snapped * @param path the directory to be taken snapshots
* @throws IOException * @throws IOException
*/ */
public void allowSnapshot(String snapshotRoot) public void allowSnapshot(String path) throws IOException {
throws IOException { dfs.allowSnapshot(path);
dfs.allowSnapshot(snapshotRoot);
} }
/** /**
* Disallow snapshot on a directory. * Disallow snapshot on a directory.
* *
* @param snapshotRoot the directory to be snapped * @param path the snapshottable directory.
* @throws IOException * @throws IOException
*/ */
public void disallowSnapshot(String snapshotRoot) public void disallowSnapshot(String path) throws IOException {
throws IOException { dfs.disallowSnapshot(path);
dfs.disallowSnapshot(snapshotRoot);
} }
@Override @Override
public void createSnapshot(String snapshotName, String snapshotRoot) public void createSnapshot(String snapshotName, String path)
throws IOException { throws IOException {
dfs.createSnapshot(snapshotName, snapshotRoot); dfs.createSnapshot(snapshotName, path);
} }
} }

View File

@ -5543,22 +5543,38 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
getEditLog().logSync(); getEditLog().logSync();
//TODO: need to update metrics in corresponding SnapshotManager method
if (auditLog.isInfoEnabled() && isExternalInvocation()) { if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
"allowSnapshot", path, null, null); "allowSnapshot", path, null, null);
} }
} }
// Disallow snapshot on a directory. /** Disallow snapshot on a directory. */
@VisibleForTesting public void disallowSnapshot(String path)
public void disallowSnapshot(String snapshotRoot)
throws SafeModeException, IOException { throws SafeModeException, IOException {
// TODO: implement, also need to update metrics in corresponding writeLock();
// SnapshotManager method try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException("Cannot disallow snapshot for " + path,
safeMode);
}
checkOwner(path);
snapshotManager.resetSnapshottable(path);
getEditLog().logDisallowSnapshot(path);
} finally {
writeUnlock();
}
getEditLog().logSync();
//TODO: need to update metrics in corresponding SnapshotManager method
if (auditLog.isInfoEnabled() && isExternalInvocation()) { if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
"disallowSnapshot", snapshotRoot, null, null); "disallowSnapshot", path, null, null);
} }
} }

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator;
import java.util.List; import java.util.List;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
@ -30,8 +31,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -248,7 +249,7 @@ public class INodeDirectory extends INode {
} }
// Resolve snapshot root // Resolve snapshot root
curNode = ((INodeDirectorySnapshottable) parentDir) curNode = ((INodeDirectorySnapshottable) parentDir)
.getSnapshotINode(components[count + 1]); .getSnapshotRoot(components[count + 1]);
if (index >= -1) { if (index >= -1) {
existing.snapshotRootIndex = existing.size; existing.snapshotRootIndex = existing.size;
} }
@ -601,20 +602,14 @@ public class INodeDirectory extends INode {
*/ */
@VisibleForTesting @VisibleForTesting
protected static void dumpTreeRecursively(PrintWriter out, protected static void dumpTreeRecursively(PrintWriter out,
StringBuilder prefix, List<? extends INode> subs) { StringBuilder prefix, Iterable<? extends INode> subs) {
prefix.append(DUMPTREE_EXCEPT_LAST_ITEM); if (subs != null) {
if (subs != null && subs.size() != 0) { for(final Iterator<? extends INode> i = subs.iterator(); i.hasNext();) {
int i = 0; final INode inode = i.next();
for(; i < subs.size() - 1; i++) { prefix.append(i.hasNext()? DUMPTREE_EXCEPT_LAST_ITEM: DUMPTREE_LAST_ITEM);
subs.get(i).dumpTreeRecursively(out, prefix); inode.dumpTreeRecursively(out, prefix);
prefix.setLength(prefix.length() - 2);
prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
}
prefix.setLength(prefix.length() - 2);
prefix.append(DUMPTREE_LAST_ITEM);
subs.get(i).dumpTreeRecursively(out, prefix);
}
prefix.setLength(prefix.length() - 2); prefix.setLength(prefix.length() - 2);
} }
} }
}
}

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator;
import java.util.List; import java.util.List;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
@ -30,7 +31,12 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
/** Directories where taking snapshots is allowed. */ /**
* Directories where taking snapshots is allowed.
*
* Like other {@link INode} subclasses, this class is synchronized externally
* by the namesystem and FSDirectory locks.
*/
@InterfaceAudience.Private @InterfaceAudience.Private
public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota { public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
static public INodeDirectorySnapshottable newInstance( static public INodeDirectorySnapshottable newInstance(
@ -51,27 +57,16 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
INode inode, String src) throws IOException { INode inode, String src) throws IOException {
final INodeDirectory dir = INodeDirectory.valueOf(inode, src); final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
if (!dir.isSnapshottable()) { if (!dir.isSnapshottable()) {
throw new SnapshotException(src + " is not a snapshottable directory."); throw new SnapshotException(
"Directory is not a snapshottable directory: " + src);
} }
return (INodeDirectorySnapshottable)dir; return (INodeDirectorySnapshottable)dir;
} }
/** A list of snapshots of this directory. */ /** Snapshots of this directory in ascending order of snapshot id. */
private final List<INodeDirectoryWithSnapshot> snapshots private final List<Snapshot> snapshots = new ArrayList<Snapshot>();
= new ArrayList<INodeDirectoryWithSnapshot>();
public INode getSnapshotINode(byte[] name) { /** Number of snapshots allowed. */
if (snapshots == null || snapshots.size() == 0) {
return null;
}
int low = Collections.binarySearch(snapshots, name);
if (low >= 0) {
return snapshots.get(low);
}
return null;
}
/** Number of snapshots is allowed. */
private int snapshotQuota; private int snapshotQuota;
private INodeDirectorySnapshottable(long nsQuota, long dsQuota, private INodeDirectorySnapshottable(long nsQuota, long dsQuota,
@ -80,14 +75,30 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
setSnapshotQuota(snapshotQuota); setSnapshotQuota(snapshotQuota);
} }
int getNumSnapshots() {
return snapshots.size();
}
/** @return the root directory of a snapshot. */
public INodeDirectory getSnapshotRoot(byte[] snapshotName) {
if (snapshots == null || snapshots.size() == 0) {
return null;
}
int low = Collections.binarySearch(snapshots, snapshotName);
if (low >= 0) {
return snapshots.get(low).getRoot();
}
return null;
}
public int getSnapshotQuota() { public int getSnapshotQuota() {
return snapshotQuota; return snapshotQuota;
} }
public void setSnapshotQuota(int snapshotQuota) { public void setSnapshotQuota(int snapshotQuota) {
if (snapshotQuota <= 0) { if (snapshotQuota < 0) {
throw new HadoopIllegalArgumentException( throw new HadoopIllegalArgumentException(
"Cannot set snapshot quota to " + snapshotQuota + " <= 0"); "Cannot set snapshot quota to " + snapshotQuota + " < 0");
} }
this.snapshotQuota = snapshotQuota; this.snapshotQuota = snapshotQuota;
} }
@ -98,8 +109,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
} }
/** Add a snapshot root under this directory. */ /** Add a snapshot root under this directory. */
INodeDirectoryWithSnapshot addSnapshotRoot(final String name void addSnapshot(final Snapshot s) throws SnapshotException {
) throws SnapshotException {
//check snapshot quota //check snapshot quota
if (snapshots.size() + 1 > snapshotQuota) { if (snapshots.size() + 1 > snapshotQuota) {
throw new SnapshotException("Failed to add snapshot: there are already " throw new SnapshotException("Failed to add snapshot: there are already "
@ -107,14 +117,12 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
+ snapshotQuota); + snapshotQuota);
} }
final INodeDirectoryWithSnapshot r = new INodeDirectoryWithSnapshot(name, this); snapshots.add(s);
snapshots.add(r);
//set modification time //set modification time
final long timestamp = Time.now(); final long timestamp = Time.now();
r.setModificationTime(timestamp); s.getRoot().setModificationTime(timestamp);
setModificationTime(timestamp); setModificationTime(timestamp);
return r;
} }
@Override @Override
@ -126,6 +134,28 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
out.print(snapshots.size() <= 1 ? " snapshot of " : " snapshots of "); out.print(snapshots.size() <= 1 ? " snapshot of " : " snapshots of ");
out.println(getLocalName()); out.println(getLocalName());
dumpTreeRecursively(out, prefix, snapshots); dumpTreeRecursively(out, prefix, new Iterable<INodeDirectoryWithSnapshot>() {
@Override
public Iterator<INodeDirectoryWithSnapshot> iterator() {
return new Iterator<INodeDirectoryWithSnapshot>() {
final Iterator<Snapshot> i = snapshots.iterator();
@Override
public boolean hasNext() {
return i.hasNext();
}
@Override
public INodeDirectoryWithSnapshot next() {
return i.next().getRoot();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
});
} }
} }

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
public class Snapshot implements Comparable<byte[]> {
/** Snapshot ID. */
private final int id;
/** The root directory of the snapshot. */
private final INodeDirectoryWithSnapshot root;
Snapshot(int id, String name, INodeDirectorySnapshottable dir) {
this.id = id;
this.root = new INodeDirectoryWithSnapshot(name, dir);
}
INodeDirectoryWithSnapshot getRoot() {
return root;
}
@Override
public int compareTo(byte[] bytes) {
return root.compareTo(bytes);
}
}

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -34,8 +34,11 @@ import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
public class SnapshotManager implements SnapshotStats { public class SnapshotManager implements SnapshotStats {
private final FSNamesystem namesystem; private final FSNamesystem namesystem;
private final FSDirectory fsdir; private final FSDirectory fsdir;
private AtomicLong numSnapshottableDirs = new AtomicLong();
private AtomicLong numSnapshots = new AtomicLong(); private final AtomicInteger numSnapshottableDirs = new AtomicInteger();
private final AtomicInteger numSnapshots = new AtomicInteger();
private int snapshotID = 0;
/** All snapshottable directories in the namesystem. */ /** All snapshottable directories in the namesystem. */
private final List<INodeDirectorySnapshottable> snapshottables private final List<INodeDirectorySnapshottable> snapshottables
@ -49,17 +52,14 @@ public class SnapshotManager implements SnapshotStats {
/** /**
* Set the given directory as a snapshottable directory. * Set the given directory as a snapshottable directory.
* If the path is already a snapshottable directory, this is a no-op. * If the path is already a snapshottable directory, update the quota.
* Otherwise, the {@link INodeDirectory} of the path is replaced by an
* {@link INodeDirectorySnapshottable}.
*/ */
public void setSnapshottable(final String path, final int snapshotQuota public void setSnapshottable(final String path, final int snapshotQuota
) throws IOException { ) throws IOException {
namesystem.writeLock();
try {
final INodeDirectory d = INodeDirectory.valueOf(fsdir.getINode(path), path); final INodeDirectory d = INodeDirectory.valueOf(fsdir.getINode(path), path);
if (d.isSnapshottable()) { if (d.isSnapshottable()) {
//The directory is already a snapshottable directory. //The directory is already a snapshottable directory.
((INodeDirectorySnapshottable)d).setSnapshotQuota(snapshotQuota);
return; return;
} }
@ -67,12 +67,31 @@ public class SnapshotManager implements SnapshotStats {
= INodeDirectorySnapshottable.newInstance(d, snapshotQuota); = INodeDirectorySnapshottable.newInstance(d, snapshotQuota);
fsdir.replaceINodeDirectory(path, d, s); fsdir.replaceINodeDirectory(path, d, s);
snapshottables.add(s); snapshottables.add(s);
} finally {
namesystem.writeUnlock();
}
numSnapshottableDirs.getAndIncrement(); numSnapshottableDirs.getAndIncrement();
} }
/**
* Set the given snapshottable directory to non-snapshottable.
*
* @throws SnapshotException if there are snapshots in the directory.
*/
public void resetSnapshottable(final String path
) throws IOException {
final INodeDirectorySnapshottable s = INodeDirectorySnapshottable.valueOf(
fsdir.getINode(path), path);
if (s.getNumSnapshots() > 0) {
throw new SnapshotException("The directory " + path + " has snapshot(s). "
+ "Please redo the operation after removing all the snapshots.");
}
final INodeDirectory d = new INodeDirectory(s);
fsdir.replaceINodeDirectory(path, s, d);
snapshottables.remove(s);
numSnapshottableDirs.getAndDecrement();
}
/** /**
* Create a snapshot of the given path. * Create a snapshot of the given path.
* *
@ -81,7 +100,18 @@ public class SnapshotManager implements SnapshotStats {
*/ */
public void createSnapshot(final String snapshotName, final String path public void createSnapshot(final String snapshotName, final String path
) throws IOException { ) throws IOException {
new SnapshotCreation(path).run(snapshotName); // Find the source root directory path where the snapshot is taken.
final INodeDirectorySnapshottable srcRoot
= INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
synchronized(this) {
final Snapshot s = new Snapshot(snapshotID, snapshotName, srcRoot);
srcRoot.addSnapshot(s);
new SnapshotCreation().processRecursively(srcRoot, s.getRoot());
//create success, update id
snapshotID++;
}
numSnapshots.getAndIncrement(); numSnapshots.getAndIncrement();
} }
@ -92,22 +122,6 @@ public class SnapshotManager implements SnapshotStats {
* where N = # files + # directories + # symlinks. * where N = # files + # directories + # symlinks.
*/ */
class SnapshotCreation { class SnapshotCreation {
/** The source root directory path where the snapshot is taken. */
final INodeDirectorySnapshottable srcRoot;
/**
* Constructor.
* @param path The path must be a snapshottable directory.
*/
private SnapshotCreation(final String path) throws IOException {
srcRoot = INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
}
void run(final String name) throws IOException {
final INodeDirectoryWithSnapshot root = srcRoot.addSnapshotRoot(name);
processRecursively(srcRoot, root);
}
/** Process snapshot creation recursively. */ /** Process snapshot creation recursively. */
private void processRecursively(final INodeDirectory srcDir, private void processRecursively(final INodeDirectory srcDir,
final INodeDirectory dstDir) throws IOException { final INodeDirectory dstDir) throws IOException {

View File

@ -107,10 +107,12 @@ public class TestFSDirectory {
for(; (line = in.readLine()) != null; ) { for(; (line = in.readLine()) != null; ) {
line = line.trim(); line = line.trim();
if (!line.contains("snapshot")) {
Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
|| line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)); || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
checkClassName(line); checkClassName(line);
} }
}
LOG.info("Create a new file " + file4); LOG.info("Create a new file " + file4);
DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed); DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed);
@ -134,8 +136,7 @@ public class TestFSDirectory {
int i = line.lastIndexOf('('); int i = line.lastIndexOf('(');
int j = line.lastIndexOf('@'); int j = line.lastIndexOf('@');
final String classname = line.substring(i+1, j); final String classname = line.substring(i+1, j);
Assert.assertTrue(classname.equals(INodeFile.class.getSimpleName()) Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
|| classname.equals(INodeDirectory.class.getSimpleName()) || classname.startsWith(INodeDirectory.class.getSimpleName()));
|| classname.equals(INodeDirectoryWithQuota.class.getSimpleName()));
} }
} }

View File

@ -89,10 +89,19 @@ public class TestSnapshotPathINodes {
// After a directory is snapshottable // After a directory is snapshottable
hdfs.allowSnapshot(path); hdfs.allowSnapshot(path);
{
final INode after = fsdir.getINode(path); final INode after = fsdir.getINode(path);
Assert.assertTrue(after instanceof INodeDirectorySnapshottable); Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
} }
hdfs.disallowSnapshot(path);
{
final INode after = fsdir.getINode(path);
Assert.assertTrue(after instanceof INodeDirectory);
Assert.assertFalse(after instanceof INodeDirectorySnapshottable);
}
}
/** /**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
* for normal (non-snapshot) file. * for normal (non-snapshot) file.