HDFS-4119. Complete the allowSnapshot code and add a test for it.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1403087 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9e26fdcda7
commit
f60a844e7a
|
@ -35,3 +35,5 @@ Branch-2802 Snapshot (Unreleased)
|
|||
block list. (szetszwo)
|
||||
|
||||
HDFS-4111. Support snapshot of subtrees. (szetszwo via suresh)
|
||||
|
||||
HDFS-4119. Complete the allowSnapshot code and add a test for it. (szetszwo)
|
||||
|
|
|
@ -879,15 +879,15 @@ public class FSEditLog implements LogsPurgeable {
|
|||
logEdit(op);
|
||||
}
|
||||
|
||||
void logAllowSnapshot(String snapName, String snapRoot) {
|
||||
void logAllowSnapshot(String path) {
|
||||
AllowSnapshotOp op = AllowSnapshotOp.getInstance(cache.get())
|
||||
.setSnapshotRoot(snapRoot);
|
||||
.setSnapshotRoot(path);
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
void logDisallowSnapshot(String snapName, String snapRoot) {
|
||||
void logDisallowSnapshot(String path) {
|
||||
DisallowSnapshotOp op = DisallowSnapshotOp.getInstance(cache.get())
|
||||
.setSnapshotRoot(snapRoot);
|
||||
.setSnapshotRoot(path);
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
|
|
|
@ -114,6 +114,11 @@ public abstract class FSEditLogOp {
|
|||
inst.put(OP_END_LOG_SEGMENT,
|
||||
new LogSegmentOp(OP_END_LOG_SEGMENT));
|
||||
inst.put(OP_UPDATE_BLOCKS, new UpdateBlocksOp());
|
||||
|
||||
inst.put(OP_ALLOW_SNAPSHOT, new AllowSnapshotOp());
|
||||
inst.put(OP_DISALLOW_SNAPSHOT, new DisallowSnapshotOp());
|
||||
inst.put(OP_CREATE_SNAPSHOT, new CreateSnapshotOp());
|
||||
inst.put(OP_DELETE_SNAPSHOT, new DeleteSnapshotOp());
|
||||
}
|
||||
|
||||
public FSEditLogOp get(FSEditLogOpCodes opcode) {
|
||||
|
|
|
@ -5463,6 +5463,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
public BlockManager getBlockManager() {
|
||||
return blockManager;
|
||||
}
|
||||
/** @return the FSDirectory. */
|
||||
public FSDirectory getFSDirectory() {
|
||||
return dir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the given identifier and password are valid and match.
|
||||
|
@ -5510,11 +5514,25 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
.isAvoidingStaleDataNodesForWrite();
|
||||
}
|
||||
|
||||
// Allow snapshot on a directroy.
|
||||
@VisibleForTesting
|
||||
public void allowSnapshot(String snapshotRoot)
|
||||
throws SafeModeException, IOException {
|
||||
// TODO: implement
|
||||
/** Allow snapshot on a directroy. */
|
||||
public void allowSnapshot(String path) throws SafeModeException, IOException {
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException("Cannot allow snapshot for " + path, safeMode);
|
||||
}
|
||||
checkOwner(path);
|
||||
|
||||
//TODO: do not hardcode snapshot quota value
|
||||
snapshotManager.setSnapshottable(path, 256);
|
||||
getEditLog().logAllowSnapshot(path);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
|
||||
//TODO: audit log
|
||||
}
|
||||
|
||||
// Disallow snapshot on a directory.
|
||||
|
@ -5542,6 +5560,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
dir.writeLock();
|
||||
try {
|
||||
snapshotManager.createSnapshot(snapshotName, path);
|
||||
getEditLog().logCreateSnapshot(snapshotName, path);
|
||||
} finally {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ public class SnapshotManager {
|
|||
}
|
||||
|
||||
/**
|
||||
* Create a snapshot of subtrees for recursively coping the directory
|
||||
* Create a snapshot of subtrees by recursively coping the directory
|
||||
* structure from the source directory to the snapshot destination directory.
|
||||
* This creation algorithm requires O(N) running time and O(N) memory,
|
||||
* where N = # files + # directories + # symlinks.
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Test snapshot related operations. */
|
||||
public class TestSnapshot {
|
||||
private static final long seed = 0;
|
||||
private static final short REPLICATION = 3;
|
||||
|
||||
private final Path dir = new Path("/TestSnapshot");
|
||||
|
||||
private final Path sub1 = new Path(dir, "sub1");
|
||||
private final Path file1 = new Path(sub1, "file1");
|
||||
private final Path file2 = new Path(sub1, "file2");
|
||||
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
private FSNamesystem fsn;
|
||||
private FSDirectory fsdir;
|
||||
|
||||
private DistributedFileSystem hdfs;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(REPLICATION)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
fsn = cluster.getNamesystem();
|
||||
fsdir = fsn.getFSDirectory();
|
||||
|
||||
hdfs = cluster.getFileSystem();
|
||||
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
|
||||
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/** Test allow-snapshot operation. */
|
||||
@Test
|
||||
public void testAllowSnapshot() throws Exception {
|
||||
final String path = sub1.toString();
|
||||
final INode before = fsdir.getINode(path);
|
||||
Assert.assertTrue(before instanceof INodeDirectory);
|
||||
Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
|
||||
|
||||
hdfs.allowSnapshot(path);
|
||||
final INode after = fsdir.getINode(path);
|
||||
Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue