HDFS-4735. DisallowSnapshot throws IllegalStateException for nested snapshottable directories. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1471214 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
aa7dd50c66
commit
65752c09ab
|
@ -269,3 +269,6 @@ Branch-2802 Snapshot (Unreleased)
|
||||||
|
|
||||||
HDFS-4719. Remove AbstractINodeDiff.Factory and move its methods to
|
HDFS-4719. Remove AbstractINodeDiff.Factory and move its methods to
|
||||||
AbstractINodeDiffList. (Arpit Agarwal via szetszwo)
|
AbstractINodeDiffList. (Arpit Agarwal via szetszwo)
|
||||||
|
|
||||||
|
HDFS-4735. DisallowSnapshot throws IllegalStateException for nested
|
||||||
|
snapshottable directories. (Jing Zhao via szetszwo)
|
||||||
|
|
|
@ -5829,8 +5829,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
writeUnlock();
|
writeUnlock();
|
||||||
}
|
}
|
||||||
getEditLog().logSync();
|
getEditLog().logSync();
|
||||||
|
|
||||||
//TODO: need to update metrics in corresponding SnapshotManager method
|
|
||||||
|
|
||||||
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
|
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
|
||||||
logAuditEvent(true, "allowSnapshot", path, null, null);
|
logAuditEvent(true, "allowSnapshot", path, null, null);
|
||||||
|
@ -5855,8 +5853,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
writeUnlock();
|
writeUnlock();
|
||||||
}
|
}
|
||||||
getEditLog().logSync();
|
getEditLog().logSync();
|
||||||
|
|
||||||
//TODO: need to update metrics in corresponding SnapshotManager method
|
|
||||||
|
|
||||||
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
|
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
|
||||||
logAuditEvent(true, "disallowSnapshot", path, null, null);
|
logAuditEvent(true, "disallowSnapshot", path, null, null);
|
||||||
|
|
|
@ -173,8 +173,6 @@ public class INodeDirectory extends INodeWithAdditionalFields {
|
||||||
|
|
||||||
/** Replace itself with an {@link INodeDirectoryWithSnapshot}. */
|
/** Replace itself with an {@link INodeDirectoryWithSnapshot}. */
|
||||||
public INodeDirectoryWithSnapshot replaceSelf4INodeDirectoryWithSnapshot() {
|
public INodeDirectoryWithSnapshot replaceSelf4INodeDirectoryWithSnapshot() {
|
||||||
Preconditions.checkState(!(this instanceof INodeDirectoryWithSnapshot),
|
|
||||||
"this is already an INodeDirectoryWithSnapshot, this=%s", this);
|
|
||||||
return replaceSelf(new INodeDirectoryWithSnapshot(this));
|
return replaceSelf(new INodeDirectoryWithSnapshot(this));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -271,7 +271,7 @@ public class Diff<K, E extends Diff.Element<K>> {
|
||||||
// Case 1.1.3 and 2.3.3: element is already in c-list,
|
// Case 1.1.3 and 2.3.3: element is already in c-list,
|
||||||
previous = created.set(c, newElement);
|
previous = created.set(c, newElement);
|
||||||
|
|
||||||
//TODO: fix a bug that previous != oldElement.Set it to oldElement for now
|
// For previous != oldElement, set it to oldElement
|
||||||
previous = oldElement;
|
previous = oldElement;
|
||||||
} else {
|
} else {
|
||||||
d = search(deleted, oldElement.getKey());
|
d = search(deleted, oldElement.getKey());
|
||||||
|
|
|
@ -173,7 +173,7 @@ public class TestFSImageWithSnapshot {
|
||||||
* 6. Dump the FSDirectory again and compare the two dumped string.
|
* 6. Dump the FSDirectory again and compare the two dumped string.
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
@Test (timeout=60000)
|
@Test
|
||||||
public void testSaveLoadImage() throws Exception {
|
public void testSaveLoadImage() throws Exception {
|
||||||
int s = 0;
|
int s = 0;
|
||||||
// make changes to the namesystem
|
// make changes to the namesystem
|
||||||
|
@ -213,8 +213,9 @@ public class TestFSImageWithSnapshot {
|
||||||
hdfs.rename(sub2file2, sub1_sub2file2);
|
hdfs.rename(sub2file2, sub1_sub2file2);
|
||||||
|
|
||||||
hdfs.rename(sub1file1, sub2file1);
|
hdfs.rename(sub1file1, sub2file1);
|
||||||
// TODO: fix case hdfs.rename(sub1file1, sub1file2);
|
checkImage(s);
|
||||||
|
|
||||||
|
hdfs.rename(sub2file1, sub2file2);
|
||||||
checkImage(s);
|
checkImage(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,8 +148,6 @@ public class TestDisallowModifyROSnapshot {
|
||||||
public void testCreateSymlink() throws Exception {
|
public void testCreateSymlink() throws Exception {
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
DFSClient dfsclient = new DFSClient(conf);
|
DFSClient dfsclient = new DFSClient(conf);
|
||||||
// TODO: if link is objInSnapshot, ParentNotDirectoryException got thrown
|
|
||||||
// first by verifyParentDir()
|
|
||||||
dfsclient.createSymlink(sub2.toString(), "/TestSnapshot/sub1/.snapshot",
|
dfsclient.createSymlink(sub2.toString(), "/TestSnapshot/sub1/.snapshot",
|
||||||
false);
|
false);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
|
import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
@ -34,11 +35,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.junit.AfterClass;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/** Testing nested snapshots. */
|
/** Testing nested snapshots. */
|
||||||
|
@ -57,16 +60,16 @@ public class TestNestedSnapshots {
|
||||||
private static MiniDFSCluster cluster;
|
private static MiniDFSCluster cluster;
|
||||||
private static DistributedFileSystem hdfs;
|
private static DistributedFileSystem hdfs;
|
||||||
|
|
||||||
@BeforeClass
|
@Before
|
||||||
public static void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
||||||
.build();
|
.build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
hdfs = cluster.getFileSystem();
|
hdfs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@After
|
||||||
public static void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
@ -279,4 +282,32 @@ public class TestNestedSnapshots {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When we have nested snapshottable directories and if we try to reset the
|
||||||
|
* snapshottable descendant back to an regular directory, we need to replace
|
||||||
|
* the snapshottable descendant with an INodeDirectoryWithSnapshot
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDisallowNestedSnapshottableDir() throws Exception {
|
||||||
|
final Path dir = new Path("/dir");
|
||||||
|
final Path sub = new Path(dir, "sub");
|
||||||
|
hdfs.mkdirs(sub);
|
||||||
|
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
|
||||||
|
final Path file = new Path(sub, "file");
|
||||||
|
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
|
||||||
|
|
||||||
|
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
||||||
|
INode subNode = fsdir.getINode(sub.toString());
|
||||||
|
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
|
||||||
|
|
||||||
|
hdfs.allowSnapshot(sub);
|
||||||
|
subNode = fsdir.getINode(sub.toString());
|
||||||
|
assertTrue(subNode instanceof INodeDirectorySnapshottable);
|
||||||
|
|
||||||
|
hdfs.disallowSnapshot(sub);
|
||||||
|
subNode = fsdir.getINode(sub.toString());
|
||||||
|
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -411,7 +411,6 @@ public class TestSnapshot {
|
||||||
* the owner, and the other indicates the group
|
* the owner, and the other indicates the group
|
||||||
*/
|
*/
|
||||||
private String[] genRandomOwner() {
|
private String[] genRandomOwner() {
|
||||||
// TODO
|
|
||||||
String[] userGroup = new String[]{"dr.who", "unknown"};
|
String[] userGroup = new String[]{"dr.who", "unknown"};
|
||||||
return userGroup;
|
return userGroup;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -46,9 +45,6 @@ import org.junit.Test;
|
||||||
* Test cases for snapshot-related information in blocksMap.
|
* Test cases for snapshot-related information in blocksMap.
|
||||||
*/
|
*/
|
||||||
public class TestSnapshotBlocksMap {
|
public class TestSnapshotBlocksMap {
|
||||||
// TODO: fix concat for snapshot
|
|
||||||
private static final boolean runConcatTest = false;
|
|
||||||
|
|
||||||
private static final long seed = 0;
|
private static final long seed = 0;
|
||||||
private static final short REPLICATION = 3;
|
private static final short REPLICATION = 3;
|
||||||
private static final int BLOCKSIZE = 1024;
|
private static final int BLOCKSIZE = 1024;
|
||||||
|
@ -208,36 +204,5 @@ public class TestSnapshotBlocksMap {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertExceptionContains("File does not exist: " + s1f0, e);
|
assertExceptionContains("File does not exist: " + s1f0, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// concat file1, file3 and file5 to file4
|
|
||||||
if (runConcatTest) {
|
|
||||||
final INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir,
|
|
||||||
blockmanager);
|
|
||||||
final BlockInfo[] f1blocks = f1.getBlocks();
|
|
||||||
final INodeFile f3 = assertBlockCollection(file3.toString(), 5, fsdir,
|
|
||||||
blockmanager);
|
|
||||||
final BlockInfo[] f3blocks = f3.getBlocks();
|
|
||||||
final INodeFile f5 = assertBlockCollection(file5.toString(), 7, fsdir,
|
|
||||||
blockmanager);
|
|
||||||
final BlockInfo[] f5blocks = f5.getBlocks();
|
|
||||||
assertBlockCollection(file4.toString(), 1, fsdir, blockmanager);
|
|
||||||
|
|
||||||
hdfs.concat(file4, new Path[]{file1, file3, file5});
|
|
||||||
|
|
||||||
final INodeFile f4 = assertBlockCollection(file4.toString(), 15, fsdir,
|
|
||||||
blockmanager);
|
|
||||||
final BlockInfo[] blocks4 = f4.getBlocks();
|
|
||||||
for(BlockInfo[] blocks : Arrays.asList(f1blocks, f3blocks, blocks4, f5blocks)) {
|
|
||||||
for(BlockInfo b : blocks) {
|
|
||||||
assertBlockCollection(blockmanager, f4, b);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertAllNull(f1, file1, snapshots);
|
|
||||||
assertAllNull(f3, file3, snapshots);
|
|
||||||
assertAllNull(f5, file5, snapshots);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: test for deletion file which was appended after taking snapshots
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,7 +213,6 @@ public class TestSnapshotReplication {
|
||||||
checkFileReplication(file1, REPLICATION, REPLICATION);
|
checkFileReplication(file1, REPLICATION, REPLICATION);
|
||||||
checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);
|
checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);
|
||||||
|
|
||||||
// TODO: check replication after deleting snapshot(s)
|
|
||||||
// Delete file1
|
// Delete file1
|
||||||
hdfs.delete(file1, true);
|
hdfs.delete(file1, true);
|
||||||
// Check replication of snapshots
|
// Check replication of snapshots
|
||||||
|
|
Loading…
Reference in New Issue