reverting r1459588. HDFS-4615.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1460785 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Siddharth Seth 2013-03-25 17:54:53 +00:00
parent c553e0ed7c
commit 3c2c6e4247
2 changed files with 45 additions and 64 deletions

View File

@ -106,9 +106,6 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4609. TestAuditLogs should release log handles between tests.
(Ivan Mitic via szetszwo)
HDFS-4615. Fix TestDFSShell failures on Windows. (Arpit Agarwal
via szetszwo)
HDFS-4584. Skip TestNNWithQJM.testNewNamenodeTakesOverWriter() on Windows.
(Arpit Agarwal via szetszwo)

View File

@ -35,7 +35,6 @@ import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.GZIPOutputStream;
@ -69,7 +68,6 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERV
*/
public class TestDFSShell {
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
private static AtomicInteger counter = new AtomicInteger();
static final String TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"))
@ -513,7 +511,7 @@ public class TestDFSShell {
createLocalFile(furi);
argv = new String[3];
argv[0] = "-put";
argv[1] = furi.toURI().toString();
argv[1] = furi.toString();
argv[2] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" put is working ", 0, ret);
@ -890,9 +888,6 @@ public class TestDFSShell {
assertEquals("rw-rw-rw-",
fs.getFileStatus(file).getPermission().toString());
// Skip "sticky bit" tests on Windows.
//
if (!Path.WINDOWS) {
// test sticky bit on directories
Path dir2 = new Path(dir, "stickybit" );
fs.mkdirs(dir2 );
@ -915,10 +910,6 @@ public class TestDFSShell {
confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);
fs.delete(dir2, true);
} else {
LOG.info("Skipped sticky bit tests on Windows");
}
fs.delete(dir, true);
} finally {
@ -1580,29 +1571,27 @@ public class TestDFSShell {
// force Copy Option is -f
@Test
public void testCopyCommandsWithForceOption() throws Exception {
final int SUCCESS = 0;
final int ERROR = 1;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithForceOption-"
+ counter.getAndIncrement();
final String localfilepath = localFile.getAbsolutePath();
final String testdir = TEST_ROOT_DIR + "/ForceTestDir";
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
localFile.createNewFile();
writeFile(fs, new Path(testdir, "testFileForPut"));
writeFile(fs, new Path(TEST_ROOT_DIR, "testFileForPut"));
shell = new FsShell();
// Tests for put
String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
int SUCCESS = 0;
int ERROR = 1;
assertEquals("put -f is not working", SUCCESS, res);
argv = new String[] { "-put", localfilepath, testdir };
@ -1674,13 +1663,8 @@ public class TestDFSShell {
try {
// Create and delete a file
fs = cluster.getFileSystem();
// Use a separate tmp dir for each invocation.
final String testdir = "/tmp/TestDFSShell-deleteFileUsingTrash-" +
counter.getAndIncrement();
writeFile(fs, new Path(testdir, "foo"));
final String testFile = testdir + "/foo";
writeFile(fs, new Path(TEST_ROOT_DIR, "foo"));
final String testFile = TEST_ROOT_DIR + "/foo";
final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
String[] argv = new String[] { "-rm", testFile };
int res = ToolRunner.run(shell, argv);