HDFS-4144. Create test for all snapshot-related metrics. Contributed by Jing Zhao.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1455805 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3b3ea5c422
commit
719c313be1
|
@ -195,3 +195,6 @@ Branch-2802 Snapshot (Unreleased)
|
|||
|
||||
HDFS-4563. Update namespace/diskspace usage after deleting snapshots.
|
||||
(Jing Zhao via szetszwo)
|
||||
|
||||
HDFS-4144. Create test for all snapshot-related metrics.
|
||||
(Jing Zhao via suresh)
|
||||
|
|
|
@ -324,8 +324,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
|||
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks);
|
||||
INodeDirectory parent = getParent();
|
||||
if (parent != null) {
|
||||
parent.addSpaceConsumed(counts.get(Quota.NAMESPACE),
|
||||
counts.get(Quota.DISKSPACE));
|
||||
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
||||
-counts.get(Quota.DISKSPACE));
|
||||
}
|
||||
} catch(QuotaExceededException e) {
|
||||
LOG.error("BUG: removeSnapshot increases namespace usage.", e);
|
||||
|
|
|
@ -253,6 +253,8 @@ public class SnapshotManager implements SnapshotStats {
|
|||
List<INodeDirectorySnapshottable> toRemoveList) {
|
||||
if (toRemoveList != null) {
|
||||
this.snapshottables.removeAll(toRemoveList);
|
||||
// modify the numSnapshottableDirs metrics
|
||||
numSnapshottableDirs.addAndGet(-toRemoveList.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
|
@ -45,7 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.After;
|
||||
|
@ -266,7 +268,7 @@ public class TestSnapshot {
|
|||
* A simple test that updates a sub-directory of a snapshottable directory
|
||||
* with snapshots
|
||||
*/
|
||||
@Test (timeout=300000)
|
||||
@Test (timeout=60000)
|
||||
public void testUpdateDirectory() throws Exception {
|
||||
Path dir = new Path("/dir");
|
||||
Path sub = new Path(dir, "sub");
|
||||
|
@ -288,11 +290,8 @@ public class TestSnapshot {
|
|||
|
||||
/**
|
||||
* Creating snapshots for a directory that is not snapshottable must fail.
|
||||
*
|
||||
* TODO: Listing/Deleting snapshots for a directory that is not snapshottable
|
||||
* should also fail.
|
||||
*/
|
||||
@Test (timeout=300000)
|
||||
@Test (timeout=60000)
|
||||
public void testSnapshottableDirectory() throws Exception {
|
||||
Path dir = new Path("/TestSnapshot/sub");
|
||||
Path file0 = new Path(dir, "file0");
|
||||
|
@ -300,10 +299,29 @@ public class TestSnapshot {
|
|||
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
|
||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
exception.expect(RemoteException.class);
|
||||
exception.expectMessage("Directory is not a snapshottable directory: "
|
||||
+ dir.toString());
|
||||
hdfs.createSnapshot(dir, "s1");
|
||||
try {
|
||||
hdfs.createSnapshot(dir, "s1");
|
||||
fail("Exception expected: " + dir + " is not snapshottable");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Directory is not a snapshottable directory: " + dir, e);
|
||||
}
|
||||
|
||||
try {
|
||||
hdfs.deleteSnapshot(dir, "s1");
|
||||
fail("Exception expected: " + dir + " is not a snapshottale dir");
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Directory is not a snapshottable directory: " + dir, e);
|
||||
}
|
||||
|
||||
try {
|
||||
hdfs.renameSnapshot(dir, "s1", "s2");
|
||||
fail("Exception expected: " + dir + " is not a snapshottale dir");
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Directory is not a snapshottable directory: " + dir, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -697,6 +697,7 @@ public class TestSnapshotDeletion {
|
|||
|
||||
// make changes on sub
|
||||
hdfs.delete(subFile1, true);
|
||||
checkQuotaUsageComputation(new Path("/"), 16, BLOCKSIZE * 11);
|
||||
checkQuotaUsageComputation(dir, 15, BLOCKSIZE * 11);
|
||||
checkQuotaUsageComputation(sub, 13, BLOCKSIZE * 11);
|
||||
|
||||
|
@ -717,6 +718,7 @@ public class TestSnapshotDeletion {
|
|||
|
||||
// delete snapshot s2
|
||||
hdfs.deleteSnapshot(dir, "s2");
|
||||
checkQuotaUsageComputation(new Path("/"), 14, BLOCKSIZE * 11);
|
||||
checkQuotaUsageComputation(dir, 13, BLOCKSIZE * 11);
|
||||
checkQuotaUsageComputation(sub, 12, BLOCKSIZE * 11);
|
||||
|
||||
|
|
Loading…
Reference in New Issue