HDFS-5023. TestSnapshotPathINodes.testAllowSnapshot is failing with jdk7 (Mit Desai via jeagles)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1550266 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Turner Eagles 2013-12-11 21:24:41 +00:00
parent 05c923a455
commit 52125d151d
2 changed files with 18 additions and 6 deletions

View File

@ -147,6 +147,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5633. Improve OfflineImageViewer to use less memory. (jing9)
HDFS-5023. TestSnapshotPathINodes.testAllowSnapshot is failing with jdk7
(Mit Desai via jeagles)
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@ -58,7 +59,7 @@ public class TestSnapshotPathINodes {
static private DistributedFileSystem hdfs;
@BeforeClass
static public void setUp() throws Exception {
public static void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
@ -69,12 +70,16 @@ public class TestSnapshotPathINodes {
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
}
@Before
public void reset() throws Exception {
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
}
@AfterClass
static public void tearDown() throws Exception {
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
@ -252,6 +257,8 @@ public class TestSnapshotPathINodes {
System.out.println("The exception is expected: " + fnfe);
}
}
hdfs.deleteSnapshot(sub1, "s1");
hdfs.disallowSnapshot(sub1);
}
/**
@ -309,6 +316,8 @@ public class TestSnapshotPathINodes {
sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),
dir.toString());
hdfs.deleteSnapshot(sub1, "s2");
hdfs.disallowSnapshot(sub1);
}
static private Snapshot s4;
@ -368,6 +377,8 @@ public class TestSnapshotPathINodes {
sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),
dir.toString());
hdfs.deleteSnapshot(sub1, "s4");
hdfs.disallowSnapshot(sub1);
}
/**
@ -376,9 +387,6 @@ public class TestSnapshotPathINodes {
*/
@Test (timeout=15000)
public void testSnapshotPathINodesAfterModification() throws Exception {
//file1 was deleted, create it again.
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
// First check the INode for /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
@ -386,7 +394,6 @@ public class TestSnapshotPathINodes {
INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length
assertEquals(inodes.length, components.length);
assertSnapshot(nodesInPath, false, s4, -1);
// The last INode should be associated with file1
assertEquals(inodes[components.length - 1].getFullPathName(),
@ -434,5 +441,7 @@ public class TestSnapshotPathINodes {
// The modification time of the INode for file3 should have been changed
Assert.assertFalse(inodes[last].getModificationTime()
== newInodes[last].getModificationTime());
hdfs.deleteSnapshot(sub1, "s3");
hdfs.disallowSnapshot(sub1);
}
}