HDFS-4079. Add SnapshotManager which maintains a list for all the snapshottable directories and supports snapshot methods such as setting a directory to snapshottable and creating a snapshot.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400728 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
625d7cf20b
commit
820b5495ca
|
@ -14,3 +14,7 @@ Branch-2802 Snapshot (Unreleased)
|
||||||
|
|
||||||
HDFS-4087. Protocol changes for listSnapshots functionality.
|
HDFS-4087. Protocol changes for listSnapshots functionality.
|
||||||
(Brandon Li via suresh)
|
(Brandon Li via suresh)
|
||||||
|
|
||||||
|
HDFS-4079. Add SnapshotManager which maintains a list for all the
|
||||||
|
snapshottable directories and supports snapshot methods such as setting a
|
||||||
|
directory to snapshottable and creating a snapshot. (szetszwo)
|
||||||
|
|
|
@ -1304,7 +1304,7 @@ public class FSDirectory implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Get {@link INode} associated with the file / directory.
|
* Get {@link INode} associated with the file / directory.
|
||||||
*/
|
*/
|
||||||
INode getINode(String src) throws UnresolvedLinkException {
|
public INode getINode(String src) throws UnresolvedLinkException {
|
||||||
readLock();
|
readLock();
|
||||||
try {
|
try {
|
||||||
INode iNode = rootDir.getNode(src, true);
|
INode iNode = rootDir.getNode(src, true);
|
||||||
|
|
|
@ -169,7 +169,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
|
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
|
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
@ -307,6 +307,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
/** The namespace tree. */
|
/** The namespace tree. */
|
||||||
FSDirectory dir;
|
FSDirectory dir;
|
||||||
private final BlockManager blockManager;
|
private final BlockManager blockManager;
|
||||||
|
private final SnapshotManager snapshotManager;
|
||||||
private final DatanodeStatistics datanodeStatistics;
|
private final DatanodeStatistics datanodeStatistics;
|
||||||
|
|
||||||
// Block pool ID used by this namenode
|
// Block pool ID used by this namenode
|
||||||
|
@ -464,6 +465,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
|
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
|
||||||
|
|
||||||
this.blockManager = new BlockManager(this, this, conf);
|
this.blockManager = new BlockManager(this, this, conf);
|
||||||
|
this.snapshotManager = new SnapshotManager(this);
|
||||||
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
|
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
|
||||||
|
|
||||||
this.fsOwner = UserGroupInformation.getCurrentUser();
|
this.fsOwner = UserGroupInformation.getCurrentUser();
|
||||||
|
@ -2963,29 +2965,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
getEditLog().logSync();
|
getEditLog().logSync();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the given directory as a snapshottable directory.
|
|
||||||
* If the path is already a snapshottable directory, this is a no-op.
|
|
||||||
* Otherwise, the {@link INodeDirectory} of the path is replaced by an
|
|
||||||
* {@link INodeDirectorySnapshottable}.
|
|
||||||
*/
|
|
||||||
void setSnapshottable(final String path) throws IOException {
|
|
||||||
writeLock();
|
|
||||||
try {
|
|
||||||
final INodeDirectory d = INodeDirectory.valueOf(dir.getINode(path), path);
|
|
||||||
if (d.isSnapshottable()) {
|
|
||||||
//The directory is already a snapshottable directory.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
final INodeDirectorySnapshottable s
|
|
||||||
= INodeDirectorySnapshottable.newInstance(d);
|
|
||||||
dir.replaceINodeDirectory(path, d, s);
|
|
||||||
} finally {
|
|
||||||
writeUnlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Persist all metadata about this file.
|
/** Persist all metadata about this file.
|
||||||
* @param src The string representation of the path
|
* @param src The string representation of the path
|
||||||
* @param clientName The string representation of the client
|
* @param clientName The string representation of the client
|
||||||
|
|
|
@ -38,7 +38,7 @@ import com.google.common.primitives.SignedBytes;
|
||||||
* directory inodes.
|
* directory inodes.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
abstract class INode implements Comparable<byte[]> {
|
public abstract class INode implements Comparable<byte[]> {
|
||||||
/*
|
/*
|
||||||
* The inode name is in java UTF8 encoding;
|
* The inode name is in java UTF8 encoding;
|
||||||
* The name in HdfsFileStatus should keep the same encoding as this.
|
* The name in HdfsFileStatus should keep the same encoding as this.
|
||||||
|
@ -135,7 +135,7 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
setPermission(ps.getPermission());
|
setPermission(ps.getPermission());
|
||||||
}
|
}
|
||||||
/** Get the {@link PermissionStatus} */
|
/** Get the {@link PermissionStatus} */
|
||||||
protected PermissionStatus getPermissionStatus() {
|
public PermissionStatus getPermissionStatus() {
|
||||||
return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
|
return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
|
||||||
}
|
}
|
||||||
private void updatePermissionStatus(PermissionStatusFormat f, long n) {
|
private void updatePermissionStatus(PermissionStatusFormat f, long n) {
|
||||||
|
@ -246,7 +246,7 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
/**
|
/**
|
||||||
* Set local file name
|
* Set local file name
|
||||||
*/
|
*/
|
||||||
void setLocalName(String name) {
|
protected void setLocalName(String name) {
|
||||||
this.name = DFSUtil.string2Bytes(name);
|
this.name = DFSUtil.string2Bytes(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
/**
|
/**
|
||||||
* Set last modification time of inode.
|
* Set last modification time of inode.
|
||||||
*/
|
*/
|
||||||
void setModificationTime(long modtime) {
|
public void setModificationTime(long modtime) {
|
||||||
assert isDirectory();
|
assert isDirectory();
|
||||||
if (this.modificationTime <= modtime) {
|
if (this.modificationTime <= modtime) {
|
||||||
this.modificationTime = modtime;
|
this.modificationTime = modtime;
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class INodeDirectory extends INode {
|
||||||
|
|
||||||
private List<INode> children;
|
private List<INode> children;
|
||||||
|
|
||||||
INodeDirectory(String name, PermissionStatus permissions) {
|
protected INodeDirectory(String name, PermissionStatus permissions) {
|
||||||
super(name, permissions);
|
super(name, permissions);
|
||||||
this.children = null;
|
this.children = null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
|
|
||||||
|
/** The root directory of a snapshot. */
|
||||||
|
public class INodeDirectorySnapshotRoot extends INodeDirectory {
|
||||||
|
INodeDirectorySnapshotRoot(String name, INodeDirectory dir) {
|
||||||
|
super(name, dir.getPermissionStatus());
|
||||||
|
setLocalName(name);
|
||||||
|
parent = dir;
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,9 +17,15 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
||||||
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
/** Directories where taking snapshots is allowed. */
|
/** Directories where taking snapshots is allowed. */
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -36,6 +42,20 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
|
||||||
return new INodeDirectorySnapshottable(nsq, dsq, dir);
|
return new INodeDirectorySnapshottable(nsq, dsq, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Cast INode to INodeDirectorySnapshottable. */
|
||||||
|
static public INodeDirectorySnapshottable valueOf(
|
||||||
|
INode inode, String src) throws IOException {
|
||||||
|
final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
|
||||||
|
if (!dir.isSnapshottable()) {
|
||||||
|
throw new SnapshotException(src + " is not a snapshottable directory.");
|
||||||
|
}
|
||||||
|
return (INodeDirectorySnapshottable)dir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** A list of snapshots of this directory. */
|
||||||
|
private final List<INodeDirectorySnapshotRoot> snapshots
|
||||||
|
= new ArrayList<INodeDirectorySnapshotRoot>();
|
||||||
|
|
||||||
private INodeDirectorySnapshottable(long nsQuota, long dsQuota,
|
private INodeDirectorySnapshottable(long nsQuota, long dsQuota,
|
||||||
INodeDirectory dir) {
|
INodeDirectory dir) {
|
||||||
super(nsQuota, dsQuota, dir);
|
super(nsQuota, dsQuota, dir);
|
||||||
|
@ -45,4 +65,16 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithQuota {
|
||||||
public boolean isSnapshottable() {
|
public boolean isSnapshottable() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Add a snapshot root under this directory. */
|
||||||
|
INodeDirectorySnapshotRoot addSnapshotRoot(final String name) {
|
||||||
|
final INodeDirectorySnapshotRoot r = new INodeDirectorySnapshotRoot(name, this);
|
||||||
|
snapshots.add(r);
|
||||||
|
|
||||||
|
//set modification time
|
||||||
|
final long timestamp = Time.now();
|
||||||
|
r.setModificationTime(timestamp);
|
||||||
|
setModificationTime(timestamp);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/** Snapshot related exception. */
|
||||||
|
public class SnapshotException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
|
SnapshotException(final String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
SnapshotException(final Throwable cause) {
|
||||||
|
super(cause);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,77 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||||
|
|
||||||
|
/** Manage snapshottable directories and their snapshots. */
|
||||||
|
public class SnapshotManager {
|
||||||
|
private final Namesystem namesystem;
|
||||||
|
|
||||||
|
/** All snapshottable directories in the namesystem. */
|
||||||
|
private final List<INodeDirectorySnapshottable> snapshottables
|
||||||
|
= new ArrayList<INodeDirectorySnapshottable>();
|
||||||
|
|
||||||
|
public SnapshotManager(final Namesystem namesystem) {
|
||||||
|
this.namesystem = namesystem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the given directory as a snapshottable directory.
|
||||||
|
* If the path is already a snapshottable directory, this is a no-op.
|
||||||
|
* Otherwise, the {@link INodeDirectory} of the path is replaced by an
|
||||||
|
* {@link INodeDirectorySnapshottable}.
|
||||||
|
*/
|
||||||
|
public void setSnapshottable(final String path,
|
||||||
|
final FSDirectory fsdir) throws IOException {
|
||||||
|
namesystem.writeLock();
|
||||||
|
try {
|
||||||
|
final INodeDirectory d = INodeDirectory.valueOf(fsdir.getINode(path), path);
|
||||||
|
if (d.isSnapshottable()) {
|
||||||
|
//The directory is already a snapshottable directory.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final INodeDirectorySnapshottable s
|
||||||
|
= INodeDirectorySnapshottable.newInstance(d);
|
||||||
|
fsdir.replaceINodeDirectory(path, d, s);
|
||||||
|
snapshottables.add(s);
|
||||||
|
} finally {
|
||||||
|
namesystem.writeUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Create a snapshot of given path. */
|
||||||
|
public void createSnapshot(final String snapshotName, final String path,
|
||||||
|
final FSDirectory fsdir) throws IOException {
|
||||||
|
final INodeDirectorySnapshottable d = INodeDirectorySnapshottable.valueOf(
|
||||||
|
fsdir.getINode(path), path);
|
||||||
|
|
||||||
|
//TODO: check ns quota
|
||||||
|
|
||||||
|
final INodeDirectorySnapshotRoot root = d.addSnapshotRoot(snapshotName);
|
||||||
|
|
||||||
|
//TODO: create the remaining subtree
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue