HDFS-3603. Decouple TestHDFSTrash from TestTrash. Contributed by Jason Lowe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1358804 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-07-08 18:15:30 +00:00
parent d4fb882163
commit 6dcf42262d
3 changed files with 22 additions and 21 deletions

View File

@ -89,7 +89,7 @@ public class TestTrash extends TestCase {
* @param base - the base path where files are created
* @throws IOException
*/
protected static void trashShell(final FileSystem fs, final Path base)
public static void trashShell(final FileSystem fs, final Path base)
throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", fs.getUri().toString());

View File

@ -178,7 +178,7 @@ Trunk (unreleased changes)
HDFS-3541. Deadlock between recovery, xceiver and packet responder (Vinay via umamahesh)
Branch-2 ( Unreleased changes )
INCOMPATIBLE CHANGES
HDFS-3446. HostsFileReader silently ignores bad includes/excludes
@ -439,6 +439,8 @@ Branch-2 ( Unreleased changes )
so that V conforms to boolean compiling HttpFSServer.java with OpenJDK
(adi2 via tucu)
HDFS-3603. Decouple TestHDFSTrash from TestTrash. (Jason Lowe via eli)
Release 2.0.0-alpha - 05-23-2012
INCOMPATIBLE CHANGES

View File

@ -19,46 +19,45 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
import junit.extensions.TestSetup;
import junit.framework.Test;
import junit.framework.TestSuite;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestTrash;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests commands from Trash.
*/
public class TestHDFSTrash extends TestTrash {
public class TestHDFSTrash {
private static MiniDFSCluster cluster = null;
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestHDFSTrash.class)) {
protected void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
protected void tearDown() throws Exception {
if (cluster != null) { cluster.shutdown(); }
}
};
return setup;
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
@AfterClass
public static void tearDown() {
if (cluster != null) { cluster.shutdown(); }
}
/**
* Tests Trash on HDFS
*/
@Test
public void testTrash() throws IOException {
trashShell(cluster.getFileSystem(), new Path("/"));
TestTrash.trashShell(cluster.getFileSystem(), new Path("/"));
}
@Test
public void testNonDefaultFS() throws IOException {
FileSystem fs = cluster.getFileSystem();
Configuration conf = fs.getConf();
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
trashNonDefaultFS(conf);
TestTrash.trashNonDefaultFS(conf);
}
}