HDFS-3603. Decouple TestHDFSTrash from TestTrash. Contributed by Jason Lowe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1358804 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-07-08 18:15:30 +00:00
parent d4fb882163
commit 6dcf42262d
3 changed files with 22 additions and 21 deletions

View File

@ -89,7 +89,7 @@ public class TestTrash extends TestCase {
* @param base - the base path where files are created * @param base - the base path where files are created
* @throws IOException * @throws IOException
*/ */
protected static void trashShell(final FileSystem fs, final Path base) public static void trashShell(final FileSystem fs, final Path base)
throws IOException { throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.set("fs.defaultFS", fs.getUri().toString()); conf.set("fs.defaultFS", fs.getUri().toString());

View File

@ -178,7 +178,7 @@ Trunk (unreleased changes)
HDFS-3541. Deadlock between recovery, xceiver and packet responder (Vinay via umamahesh) HDFS-3541. Deadlock between recovery, xceiver and packet responder (Vinay via umamahesh)
Branch-2 ( Unreleased changes ) Branch-2 ( Unreleased changes )
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
HDFS-3446. HostsFileReader silently ignores bad includes/excludes HDFS-3446. HostsFileReader silently ignores bad includes/excludes
@ -439,6 +439,8 @@ Branch-2 ( Unreleased changes )
so that V conforms to boolean compiling HttpFSServer.java with OpenJDK so that V conforms to boolean compiling HttpFSServer.java with OpenJDK
(adi2 via tucu) (adi2 via tucu)
HDFS-3603. Decouple TestHDFSTrash from TestTrash. (Jason Lowe via eli)
Release 2.0.0-alpha - 05-23-2012 Release 2.0.0-alpha - 05-23-2012
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -19,46 +19,45 @@ package org.apache.hadoop.hdfs;
import java.io.IOException; import java.io.IOException;
import junit.extensions.TestSetup;
import junit.framework.Test;
import junit.framework.TestSuite;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestTrash; import org.apache.hadoop.fs.TestTrash;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/** /**
* This class tests commands from Trash. * This class tests commands from Trash.
*/ */
public class TestHDFSTrash extends TestTrash { public class TestHDFSTrash {
private static MiniDFSCluster cluster = null; private static MiniDFSCluster cluster = null;
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestHDFSTrash.class)) { @BeforeClass
protected void setUp() throws Exception { public static void setUp() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
} }
protected void tearDown() throws Exception {
if (cluster != null) { cluster.shutdown(); } @AfterClass
} public static void tearDown() {
}; if (cluster != null) { cluster.shutdown(); }
return setup;
} }
/** /**
* Tests Trash on HDFS * Tests Trash on HDFS
*/ */
@Test
public void testTrash() throws IOException { public void testTrash() throws IOException {
trashShell(cluster.getFileSystem(), new Path("/")); TestTrash.trashShell(cluster.getFileSystem(), new Path("/"));
} }
@Test
public void testNonDefaultFS() throws IOException { public void testNonDefaultFS() throws IOException {
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
Configuration conf = fs.getConf(); Configuration conf = fs.getConf();
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString()); conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
trashNonDefaultFS(conf); TestTrash.trashNonDefaultFS(conf);
} }
} }