HDFS-5033. Bad error message for fs -put/copyFromLocal if user doesn't have permissions to read the source (Darrell Taylor via aw)

This commit is contained in:
Allen Wittenauer 2015-05-27 16:51:34 -07:00
parent d6e3164d4a
commit bf500d9798
3 changed files with 268 additions and 190 deletions

View File

@ -65,7 +65,7 @@ public class FileUtil {
/**
* convert an array of FileStatus to an array of Path
*
*
* @param stats
* an array of FileStatus objects
* @return an array of paths corresponding to the input
@ -95,7 +95,7 @@ public class FileUtil {
else
return stat2Paths(stats);
}
/**
* Delete a directory and all its contents. If
* we return false, the directory may be partially-deleted.
@ -110,7 +110,7 @@ public class FileUtil {
public static boolean fullyDelete(final File dir) {
return fullyDelete(dir, false);
}
/**
* Delete a directory and all its contents. If
* we return false, the directory may be partially-deleted.
@ -127,7 +127,7 @@ public class FileUtil {
*/
public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) {
if (tryGrantPermissions) {
// try to chmod +rwx the parent folder of the 'dir':
// try to chmod +rwx the parent folder of the 'dir':
File parent = dir.getParentFile();
grantPermissions(parent);
}
@ -189,7 +189,7 @@ public class FileUtil {
}
return !ex;
}
/**
* Delete the contents of a directory, not the directory itself. If
* we return false, the directory may be partially-deleted.
@ -199,19 +199,19 @@ public class FileUtil {
public static boolean fullyDeleteContents(final File dir) {
return fullyDeleteContents(dir, false);
}
/**
* Delete the contents of a directory, not the directory itself. If
* we return false, the directory may be partially-deleted.
* If dir is a symlink to a directory, all the contents of the actual
* directory pointed to by dir will be deleted.
* @param tryGrantPermissions if 'true', try grant +rwx permissions to this
* @param tryGrantPermissions if 'true', try grant +rwx permissions to this
* and all the underlying directories before trying to delete their contents.
*/
public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) {
if (tryGrantPermissions) {
// to be able to list the dir and delete files from it
// we must grant the dir rwx permissions:
// we must grant the dir rwx permissions:
grantPermissions(dir);
}
boolean deletionSucceeded = true;
@ -246,14 +246,14 @@ public class FileUtil {
/**
* Recursively delete a directory.
*
*
* @param fs {@link FileSystem} on which the path is present
* @param dir directory to recursively delete
* @param dir directory to recursively delete
* @throws IOException
* @deprecated Use {@link FileSystem#delete(Path, boolean)}
*/
@Deprecated
public static void fullyDelete(FileSystem fs, Path dir)
public static void fullyDelete(FileSystem fs, Path dir)
throws IOException {
fs.delete(dir, true);
}
@ -262,9 +262,9 @@ public class FileUtil {
// If the destination is a subdirectory of the source, then
// generate exception
//
private static void checkDependencies(FileSystem srcFS,
Path src,
FileSystem dstFS,
private static void checkDependencies(FileSystem srcFS,
Path src,
FileSystem dstFS,
Path dst)
throws IOException {
if (srcFS == dstFS) {
@ -282,16 +282,16 @@ public class FileUtil {
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
boolean deleteSource,
Configuration conf) throws IOException {
return copy(srcFS, src, dstFS, dst, deleteSource, true, conf);
}
public static boolean copy(FileSystem srcFS, Path[] srcs,
public static boolean copy(FileSystem srcFS, Path[] srcs,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean deleteSource,
boolean overwrite, Configuration conf)
throws IOException {
boolean gotException = false;
@ -307,7 +307,7 @@ public class FileUtil {
"does not exist");
} else {
FileStatus sdst = dstFS.getFileStatus(dst);
if (!sdst.isDirectory())
if (!sdst.isDirectory())
throw new IOException("copying multiple files, but last argument `" +
dst + "' is not a directory");
}
@ -329,8 +329,8 @@ public class FileUtil {
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite,
Configuration conf) throws IOException {
@ -375,21 +375,21 @@ public class FileUtil {
} else {
return true;
}
}
/** Copy all files in a directory to one output file (merge). */
public static boolean copyMerge(FileSystem srcFS, Path srcDir,
FileSystem dstFS, Path dstFile,
public static boolean copyMerge(FileSystem srcFS, Path srcDir,
FileSystem dstFS, Path dstFile,
boolean deleteSource,
Configuration conf, String addString) throws IOException {
dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);
if (!srcFS.getFileStatus(srcDir).isDirectory())
return false;
OutputStream out = dstFS.create(dstFile);
try {
FileStatus contents[] = srcFS.listStatus(srcDir);
Arrays.sort(contents);
@ -400,24 +400,24 @@ public class FileUtil {
IOUtils.copyBytes(in, out, conf, false);
if (addString!=null)
out.write(addString.getBytes("UTF-8"));
} finally {
in.close();
}
}
}
}
} finally {
out.close();
}
if (deleteSource) {
return srcFS.delete(srcDir, true);
} else {
return true;
}
}
}
/** Copy local files to a FileSystem. */
public static boolean copy(File src,
FileSystem dstFS, Path dst,
@ -446,8 +446,12 @@ public class FileUtil {
IOUtils.closeStream( in );
throw e;
}
} else if (!src.canRead()) {
throw new IOException(src.toString() +
": Permission denied");
} else {
throw new IOException(src.toString() +
throw new IOException(src.toString() +
": No such file or directory");
}
if (deleteSource) {
@ -458,7 +462,7 @@ public class FileUtil {
}
/** Copy FileSystem files to local files. */
public static boolean copy(FileSystem srcFS, Path src,
public static boolean copy(FileSystem srcFS, Path src,
File dst, boolean deleteSource,
Configuration conf) throws IOException {
FileStatus filestatus = srcFS.getFileStatus(src);
@ -516,7 +520,7 @@ public class FileUtil {
public static String makeShellPath(String filename) throws IOException {
return filename;
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param file The filename to convert
@ -530,12 +534,12 @@ public class FileUtil {
/**
* Convert a os-native filename to a path that works for the shell.
* @param file The filename to convert
* @param makeCanonicalPath
* @param makeCanonicalPath
* Whether to make canonical path for the file passed
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(File file, boolean makeCanonicalPath)
public static String makeShellPath(File file, boolean makeCanonicalPath)
throws IOException {
if (makeCanonicalPath) {
return makeShellPath(file.getCanonicalPath());
@ -547,7 +551,7 @@ public class FileUtil {
/**
* Takes an input dir and returns the du on that local directory. Very basic
* implementation.
*
*
* @param dir
* The input dir to get the disk space of this local dir
* @return The total disk space of the input local directory
@ -576,7 +580,7 @@ public class FileUtil {
return size;
}
}
/**
* Given a File input it will unzip the file in a the unzip directory
* passed as the second parameter
@ -596,9 +600,9 @@ public class FileUtil {
InputStream in = zipFile.getInputStream(entry);
try {
File file = new File(unzipDir, entry.getName());
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().isDirectory()) {
throw new IOException("Mkdirs failed to create " +
throw new IOException("Mkdirs failed to create " +
file.getParentFile().toString());
}
}
@ -625,10 +629,10 @@ public class FileUtil {
/**
* Given a Tar File as input it will untar the file in a the untar directory
* passed as the second parameter
*
*
* This utility will untar ".tar" files and ".tar.gz","tgz" files.
*
* @param inFile The tar file as input.
*
* @param inFile The tar file as input.
* @param untarDir The untar directory where to untar the tar file.
* @throws IOException
*/
@ -641,17 +645,17 @@ public class FileUtil {
boolean gzipped = inFile.toString().endsWith("gz");
if(Shell.WINDOWS) {
// Tar is not native to Windows. Use simple Java based implementation for
// Tar is not native to Windows. Use simple Java based implementation for
// tests and simple tar archives
unTarUsingJava(inFile, untarDir, gzipped);
}
else {
// spawn tar utility to untar archive for full fledged unix behavior such
// spawn tar utility to untar archive for full fledged unix behavior such
// as resolving symlinks in tar archives
unTarUsingTar(inFile, untarDir, gzipped);
}
}
private static void unTarUsingTar(File inFile, File untarDir,
boolean gzipped) throws IOException {
StringBuffer untarCommand = new StringBuffer();
@ -659,9 +663,9 @@ public class FileUtil {
untarCommand.append(" gzip -dc '");
untarCommand.append(FileUtil.makeShellPath(inFile));
untarCommand.append("' | (");
}
}
untarCommand.append("cd '");
untarCommand.append(FileUtil.makeShellPath(untarDir));
untarCommand.append(FileUtil.makeShellPath(untarDir));
untarCommand.append("' ; ");
untarCommand.append("tar -xf ");
@ -675,11 +679,11 @@ public class FileUtil {
shexec.execute();
int exitcode = shexec.getExitCode();
if (exitcode != 0) {
throw new IOException("Error untarring file " + inFile +
throw new IOException("Error untarring file " + inFile +
". Tar process exited with exit code " + exitcode);
}
}
private static void unTarUsingJava(File inFile, File untarDir,
boolean gzipped) throws IOException {
InputStream inputStream = null;
@ -702,7 +706,7 @@ public class FileUtil {
IOUtils.cleanup(LOG, tis, inputStream);
}
}
private static void unpackEntries(TarArchiveInputStream tis,
TarArchiveEntry entry, File outputDir) throws IOException {
if (entry.isDirectory()) {
@ -739,14 +743,14 @@ public class FileUtil {
outputStream.flush();
outputStream.close();
}
/**
* Class for creating hardlinks.
* Supports Unix, WindXP.
* @deprecated Use {@link org.apache.hadoop.fs.HardLink}
*/
@Deprecated
public static class HardLink extends org.apache.hadoop.fs.HardLink {
public static class HardLink extends org.apache.hadoop.fs.HardLink {
// This is a stub to assist with coordinated change between
// COMMON and HDFS projects. It will be removed after the
// corresponding change is committed to HDFS.
@ -759,7 +763,7 @@ public class FileUtil {
* setting, we will log a warning. The return code in this
* case is 2.
*
* @param target the target for symlink
* @param target the target for symlink
* @param linkname the symlink
* @return 0 on success
*/
@ -873,7 +877,7 @@ public class FileUtil {
shExec.execute();
}catch(IOException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Error while changing permission : " + filename
LOG.debug("Error while changing permission : " + filename
+" Exception: " + StringUtils.stringifyException(e));
}
}
@ -1041,9 +1045,9 @@ public class FileUtil {
execSetPermission(f, permission);
return;
}
boolean rv = true;
// read perms
rv = f.setReadable(group.implies(FsAction.READ), false);
checkReturnValue(rv, f, permission);
@ -1069,17 +1073,17 @@ public class FileUtil {
}
}
private static void checkReturnValue(boolean rv, File p,
private static void checkReturnValue(boolean rv, File p,
FsPermission permission
) throws IOException {
if (!rv) {
throw new IOException("Failed to set permissions of path: " + p +
" to " +
throw new IOException("Failed to set permissions of path: " + p +
" to " +
String.format("%04o", permission.toShort()));
}
}
private static void execSetPermission(File f,
private static void execSetPermission(File f,
FsPermission permission
) throws IOException {
if (NativeIO.isAvailable()) {
@ -1089,7 +1093,7 @@ public class FileUtil {
String.format("%04o", permission.toShort()), false));
}
}
static String execCommand(File f, String... cmd) throws IOException {
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
@ -1147,12 +1151,12 @@ public class FileUtil {
}
}
}
/**
* A wrapper for {@link File#listFiles()}. This java.io API returns null
* A wrapper for {@link File#listFiles()}. This java.io API returns null
* when a dir is not a directory or for any I/O error. Instead of having
* null check everywhere File#listFiles() is used, we will add utility API
* to get around this problem. For the majority of cases where we prefer
* to get around this problem. For the majority of cases where we prefer
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of files or empty list
@ -1165,13 +1169,13 @@ public class FileUtil {
+ dir.toString());
}
return files;
}
}
/**
* A wrapper for {@link File#list()}. This java.io API returns null
* A wrapper for {@link File#list()}. This java.io API returns null
* when a dir is not a directory or for any I/O error. Instead of having
* null check everywhere File#list() is used, we will add utility API
* to get around this problem. For the majority of cases where we prefer
* to get around this problem. For the majority of cases where we prefer
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of file names or empty string list
@ -1184,35 +1188,35 @@ public class FileUtil {
+ dir.toString());
}
return fileNames;
}
}
public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
Map<String, String> callerEnv) throws IOException {
return createJarWithClassPath(inputClassPath, pwd, pwd, callerEnv);
}
/**
* Create a jar file at the given path, containing a manifest with a classpath
* that references all specified entries.
*
*
* Some platforms may have an upper limit on command line length. For example,
* the maximum command line length on Windows is 8191 characters, but the
* length of the classpath may exceed this. To work around this limitation,
* use this method to create a small intermediate jar with a manifest that
* contains the full classpath. It returns the absolute path to the new jar,
* which the caller may set as the classpath for a new process.
*
*
* Environment variable evaluation is not supported within a jar manifest, so
* this method expands environment variables before inserting classpath entries
* to the manifest. The method parses environment variables according to
* platform-specific syntax (%VAR% on Windows, or $VAR otherwise). On Windows,
* environment variables are case-insensitive. For example, %VAR% and %var%
* evaluate to the same value.
*
*
* Specifying the classpath in a jar manifest does not support wildcards, so
* this method expands wildcards internally. Any classpath entry that ends
* with * is translated to all files at that path with extension .jar or .JAR.
*
*
* @param inputClassPath String input classpath to bundle into the jar manifest
* @param pwd Path to working directory to save jar
* @param targetDir path to where the jar execution will have its working dir

View File

@ -160,6 +160,9 @@ Trunk (Unreleased)
HDFS-6353. Check and make checkpoint before stopping the NameNode. (jing9)
HDFS-5033. Bad error message for fs -put/copyFromLocal if user
doesn't have permissions to read the source (Darrell Taylor via aw)
OPTIMIZATIONS
BUG FIXES

View File

@ -155,13 +155,13 @@ public class TestDFSShell {
assertTrue(f1.exists());
assertTrue(f1.isFile());
assertEquals(0L, f1.length());
//copy to remote
final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
final Path remotef = new Path(root, "dst");
show("copy local " + f1 + " to remote " + remotef);
dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
//getBlockSize() should not throw exception
show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
@ -172,7 +172,7 @@ public class TestDFSShell {
assertTrue(f2.exists());
assertTrue(f2.isFile());
assertEquals(0L, f2.length());
f1.delete();
f2.delete();
} finally {
@ -180,13 +180,13 @@ public class TestDFSShell {
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testRecursiveRm() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),
assertTrue("Not a HDFS: " + fs.getUri(),
fs instanceof DistributedFileSystem);
try {
fs.mkdirs(new Path(new Path("parent"), "child"));
@ -201,12 +201,12 @@ public class TestDFSShell {
} catch(IOException e) {
assert(false);
}
} finally {
} finally {
try { fs.close();}catch(IOException e){};
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testDu() throws IOException {
int replication = 2;
@ -220,7 +220,7 @@ public class TestDFSShell {
System.setOut(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
try {
Path myPath = new Path("/test/dir");
assertTrue(fs.mkdirs(myPath));
@ -235,7 +235,7 @@ public class TestDFSShell {
Long myFileDiskUsed = myFileLength * replication;
Long myFile2Length = fs.getFileStatus(myFile2).getLen();
Long myFile2DiskUsed = myFile2Length * replication;
String[] args = new String[2];
args[0] = "-du";
args[1] = "/test/dir";
@ -254,7 +254,7 @@ public class TestDFSShell {
assertThat(returnString, containsString(myFileDiskUsed.toString()));
assertThat(returnString, containsString(myFile2Length.toString()));
assertThat(returnString, containsString(myFile2DiskUsed.toString()));
// Check that -du -s reports the state of the snapshot
String snapshotName = "ss1";
Path snapshotPath = new Path(myPath, ".snapshot/" + snapshotName);
@ -306,7 +306,7 @@ public class TestDFSShell {
System.setOut(psBackup);
cluster.shutdown();
}
}
@Test (timeout = 30000)
@ -321,15 +321,15 @@ public class TestDFSShell {
try {
// remove left over crc files:
new File(TEST_ROOT_DIR, ".f1.crc").delete();
new File(TEST_ROOT_DIR, ".f2.crc").delete();
new File(TEST_ROOT_DIR, ".f2.crc").delete();
final File f1 = createLocalFile(new File(TEST_ROOT_DIR, "f1"));
final File f2 = createLocalFile(new File(TEST_ROOT_DIR, "f2"));
final Path root = mkdir(dfs, new Path("/test/put"));
final Path dst = new Path(root, "dst");
show("begin");
final Thread copy2ndFileThread = new Thread() {
@Override
public void run() {
@ -344,13 +344,13 @@ public class TestDFSShell {
assertTrue(false);
}
};
//use SecurityManager to pause the copying of f1 and begin copying f2
SecurityManager sm = System.getSecurityManager();
System.out.println("SecurityManager = " + sm);
System.setSecurityManager(new SecurityManager() {
private boolean firstTime = true;
@Override
public void checkPermission(Permission perm) {
if (firstTime) {
@ -359,7 +359,7 @@ public class TestDFSShell {
String s = "" + Arrays.asList(t.getStackTrace());
if (s.contains("FileUtil.copyContent")) {
//pause at FileUtil.copyContent
firstTime = false;
copy2ndFileThread.start();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
@ -371,7 +371,7 @@ public class TestDFSShell {
show("copy local " + f1 + " to remote " + dst);
dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), dst);
show("done");
try {copy2ndFileThread.join();} catch (InterruptedException e) { }
System.setSecurityManager(sm);
@ -381,8 +381,8 @@ public class TestDFSShell {
srcs[0] = new Path(f1.getPath());
srcs[1] = new Path(f2.getPath());
dfs.copyFromLocalFile(false, false, srcs, destmultiple);
srcs[0] = new Path(destmultiple,"f1");
srcs[1] = new Path(destmultiple,"f2");
srcs[0] = new Path(destmultiple,"f1");
srcs[1] = new Path(destmultiple,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
@ -492,7 +492,7 @@ public class TestDFSShell {
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -mkdir returned 1 ", 1, ret);
assertTrue(" -mkdir returned File exists",
assertTrue(" -mkdir returned File exists",
(returned.lastIndexOf("File exists") != -1));
Path testFile = new Path("/testfile");
OutputStream outtmp = srcFs.create(testFile);
@ -520,7 +520,7 @@ public class TestDFSShell {
argv[2] = "/testfiletest";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue("no output from rename",
assertTrue("no output from rename",
(returned.lastIndexOf("Renamed") == -1));
out.reset();
argv[0] = "-mv";
@ -557,7 +557,7 @@ public class TestDFSShell {
}
}
}
@Test (timeout = 30000)
public void testURIPaths() throws Exception {
Configuration srcConf = new HdfsConfiguration();
@ -580,14 +580,14 @@ public class TestDFSShell {
argv[1] = dstFs.getUri().toString() + "/";
int ret = ToolRunner.run(shell, argv);
assertEquals("ls works on remote uri ", 0, ret);
//check for rm -r
//check for rm -r
dstFs.mkdirs(new Path("/hadoopdir"));
argv = new String[2];
argv[0] = "-rmr";
argv[1] = dstFs.getUri().toString() + "/hadoopdir";
ret = ToolRunner.run(shell, argv);
assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
//check du
//check du
argv[0] = "-du";
argv[1] = dstFs.getUri().toString() + "/";
ret = ToolRunner.run(shell, argv);
@ -601,14 +601,14 @@ public class TestDFSShell {
argv[2] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" put is working ", 0, ret);
//check cp
//check cp
argv[0] = "-cp";
argv[1] = dstFs.getUri().toString() + "/furi";
argv[2] = srcFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" cp is working ", 0, ret);
assertTrue(srcFs.exists(new Path("/furi")));
//check cat
//check cat
argv = new String[2];
argv[0] = "-cat";
argv[1] = dstFs.getUri().toString() + "/furi";
@ -626,7 +626,7 @@ public class TestDFSShell {
confirmOwner(null, "herbivores", dstFs, parent, path);
runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
confirmOwner(null, "reptiles", dstFs, root, parent, path);
//check if default hdfs:/// works
//check if default hdfs:/// works
argv[0] = "-cat";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
@ -757,7 +757,7 @@ public class TestDFSShell {
assertTrue("Output doesn't match input",
Arrays.equals(writebytes, out.toByteArray()));
out.reset();
// Test a plain text.
OutputStream pout = fs.create(new Path(root, "file.txt"));
writebytes = "bar".getBytes();
@ -805,8 +805,8 @@ public class TestDFSShell {
}
File localroot = new File(TEST_ROOT_DIR, "copyToLocal");
File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");
File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");
File f1 = new File(localroot, "f1");
assertTrue("Copying failed.", f1.isFile());
@ -821,9 +821,9 @@ public class TestDFSShell {
File f4 = new File(sub, "f4");
assertTrue("Copying failed.", f4.isFile());
File f5 = new File(localroot2, "f1");
assertTrue("Copying failed.", f5.isFile());
assertTrue("Copying failed.", f5.isFile());
f1.delete();
f2.delete();
@ -836,12 +836,12 @@ public class TestDFSShell {
// destination files
{
String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR};
try {
try {
assertEquals(1, shell.run(args));
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
}
File f6 = new File(TEST_ROOT_DIR, "nosuchfile");
assertTrue(!f6.exists());
}
@ -867,7 +867,7 @@ public class TestDFSShell {
String path = "/test/" + name;
Path root = mkdir(fs, new Path(path));
Path sub = mkdir(fs, new Path(root, "sub"));
Path root2 = mkdir(fs, new Path(path + "2"));
Path root2 = mkdir(fs, new Path(path + "2"));
writeFile(fs, new Path(root, "f1"));
writeFile(fs, new Path(root, "f2"));
@ -900,7 +900,7 @@ public class TestDFSShell {
localpath = localpath.makeQualified(localfs.getUri(),
localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr = localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr, 1, 0, shell);
@ -915,7 +915,7 @@ public class TestDFSShell {
}
private static void runCount(String path, long dirs, long files, FsShell shell
) throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bytes);
PrintStream oldOut = System.out;
System.setOut(out);
@ -956,15 +956,15 @@ public class TestDFSShell {
throw new IOException(StringUtils.stringifyException(e));
}
}
/**
* Test chmod.
*/
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
throws IOException {
FsShell shell = new FsShell();
shell.setConf(conf);
try {
//first make dir
Path dir = new Path(chmodDir);
@ -1039,8 +1039,8 @@ public class TestDFSShell {
LOG.info("Permission change result: " + result);
assertEquals(expected, result);
}
private void confirmOwner(String owner, String group,
private void confirmOwner(String owner, String group,
FileSystem fs, Path... paths) throws IOException {
for(Path path : paths) {
if (owner != null) {
@ -1051,68 +1051,68 @@ public class TestDFSShell {
}
}
}
@Test (timeout = 30000)
public void testFilePermissions() throws IOException {
Configuration conf = new HdfsConfiguration();
//test chmod on local fs
FileSystem fs = FileSystem.getLocal(conf);
testChmod(conf, fs,
testChmod(conf, fs,
(new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
//test chmod on DFS
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = cluster.getFileSystem();
testChmod(conf, fs, "/tmp/chmodTest");
// test chown and chgrp on DFS:
FsShell shell = new FsShell();
shell.setConf(conf);
fs = cluster.getFileSystem();
/* For dfs, I am the super user and I can change owner of any file to
* anything. "-R" option is already tested by chmod test above.
*/
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(fs, path);
runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
confirmOwner(null, "herbivores", fs, parent, path);
runCmd(shell, "-chgrp", "mammals", file);
confirmOwner(null, "mammals", fs, path);
runCmd(shell, "-chown", "-R", ":reptiles", "/");
confirmOwner(null, "reptiles", fs, root, parent, path);
runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
confirmOwner("python", "reptiles", fs, path);
runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
confirmOwner("hadoop", "toys", fs, root, parent, path);
// Test different characters in names
runCmd(shell, "-chown", "hdfs.user", file);
confirmOwner("hdfs.user", null, fs, path);
runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
runCmd(shell, "-chown", "hdfs/hadoop-core@apache.org:asf-projects", file);
confirmOwner("hdfs/hadoop-core@apache.org", "asf-projects", fs, path);
runCmd(shell, "-chgrp", "hadoop-core@apache.org/100", file);
confirmOwner(null, "hadoop-core@apache.org/100", fs, path);
cluster.shutdown();
}
/**
@ -1142,7 +1142,7 @@ public class TestDFSShell {
Path myFile = new Path("/test/mkdirs/myFile");
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
Path myFile2 = new Path("/test/mkdirs/myFile2");
Path myFile2 = new Path("/test/mkdirs/myFile2");
writeFile(fileSys, myFile2);
assertTrue(fileSys.exists(myFile2));
@ -1156,7 +1156,7 @@ public class TestDFSShell {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
e.getLocalizedMessage());
}
assertTrue(val == 0);
assertFalse(fileSys.exists(myFile));
@ -1174,13 +1174,13 @@ public class TestDFSShell {
String[] args = new String[3];
args[0] = "-cat";
args[1] = "/test/mkdirs/myFile";
args[2] = "/test/mkdirs/myFile2";
args[2] = "/test/mkdirs/myFile2";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run: " +
StringUtils.stringifyException(e));
StringUtils.stringifyException(e));
}
assertTrue(val == 0);
}
@ -1196,7 +1196,7 @@ public class TestDFSShell {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
e.getLocalizedMessage());
}
assertTrue(val != 0);
}
@ -1211,7 +1211,7 @@ public class TestDFSShell {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
e.getLocalizedMessage());
}
assertTrue(val != 0);
}
@ -1226,7 +1226,7 @@ public class TestDFSShell {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
@ -1480,7 +1480,7 @@ public class TestDFSShell {
for(Block b : e.getValue()) {
files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
}
}
}
}
return files;
}
@ -1493,7 +1493,7 @@ public class TestDFSShell {
PrintWriter out = new PrintWriter(f);
out.print(content);
out.flush();
out.close();
out.close();
}
}
@ -1503,7 +1503,7 @@ public class TestDFSShell {
@Test (timeout = 30000)
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
MiniDFSCluster dfs = null;
PrintStream bak = null;
@ -1515,7 +1515,7 @@ public class TestDFSShell {
fs.mkdirs(p);
fs.setPermission(p, new FsPermission((short)0700));
bak = System.err;
tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
@ -1529,9 +1529,9 @@ public class TestDFSShell {
int ret = ToolRunner.run(fshell, args);
assertEquals("returned should be 1", 1, ret);
String str = out.toString();
assertTrue("permission denied printed",
assertTrue("permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
out.reset();
return null;
}
});
@ -1544,7 +1544,7 @@ public class TestDFSShell {
}
}
}
@Test (timeout = 30000)
public void testGet() throws IOException {
GenericTestUtils.setLogLevel(FSInputChecker.LOG, Level.ALL);
@ -1563,7 +1563,7 @@ public class TestDFSShell {
String dst = new File(TEST_ROOT_DIR, fname + ++count)
.getAbsolutePath();
String[] args = new String[options.length + 3];
args[0] = "-get";
args[0] = "-get";
args[args.length - 2] = remotef.toString();
args[args.length - 1] = dst;
for(int i = 0; i < options.length; i++) {
@ -1574,9 +1574,9 @@ public class TestDFSShell {
try {
assertEquals(exitcode, shell.run(args));
} catch (Exception e) {
assertTrue(StringUtils.stringifyException(e), false);
assertTrue(StringUtils.stringifyException(e), false);
}
return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
}
};
@ -1645,9 +1645,9 @@ public class TestDFSShell {
try {
final String root = createTree(dfs, "lsr");
dfs.mkdirs(new Path(root, "zzz"));
runLsr(new FsShell(conf), root, 0);
final Path sub = new Path(root, "sub");
dfs.setPermission(sub, new FsPermission((short)0));
@ -1670,7 +1670,7 @@ public class TestDFSShell {
private static String runLsr(final FsShell shell, String root, int returnvalue
) throws Exception {
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
@ -1688,7 +1688,7 @@ public class TestDFSShell {
System.out.println("results:\n" + results);
return results;
}
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
@ -1703,7 +1703,7 @@ public class TestDFSShell {
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals("expected to fail -1", res , -1);
}
// Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
// ACLs)
@Test (timeout = 120000)
@ -1737,15 +1737,15 @@ public class TestDFSShell {
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
fs.setXAttr(src, USER_A1, USER_A1_VALUE);
fs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(conf);
// -p
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", SUCCESS, ret);
@ -1764,7 +1764,7 @@ public class TestDFSShell {
// -ptop
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptop is not working", SUCCESS, ret);
@ -1783,7 +1783,7 @@ public class TestDFSShell {
// -ptopx
Path target3 = new Path(hdfsTestDir, "targetfile3");
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
target3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopx is not working", SUCCESS, ret);
@ -2317,6 +2317,77 @@ public class TestDFSShell {
}
}
/* [refs HDFS-5033]
*
* return a "Permission Denied" message instead of "No such file or Directory"
* when trying to put/copyFromLocal a file that doesn't have read access
*
*/
@Test (timeout = 30000)
public void testCopyFromLocalWithPermissionDenied() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
PrintStream bak = null;
final File localFile = new File(TEST_ROOT_DIR, "testFileWithNoReadPermissions");
final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir = "/tmp/TestDFSShell-CopyFromLocalWithPermissionDenied-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
localFile.createNewFile();
localFile.setReadable(false);
writeFile(fs, new Path(testdir, "testFileForPut"));
shell = new FsShell();
// capture system error messages, snarfed from testErrOutPut()
bak = System.err;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
// Tests for put
String[] argv = new String[] { "-put", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
assertEquals("put is working", ERROR, res);
String returned = out.toString();
assertTrue(" outputs Permission denied error message",
(returned.lastIndexOf("Permission denied") != -1));
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("copyFromLocal -f is working", ERROR, res);
returned = out.toString();
assertTrue(" outputs Permission denied error message",
(returned.lastIndexOf("Permission denied") != -1));
} finally {
if (bak != null) {
System.setErr(bak);
}
if (null != shell)
shell.close();
if (localFile.exists())
localFile.delete();
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// setrep for file and directory.
@Test (timeout = 30000)
public void testSetrep() throws Exception {
@ -2506,7 +2577,7 @@ public class TestDFSShell {
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testSetXAttrPermission() throws Exception {
UserGroupInformation user = UserGroupInformation.
@ -2517,16 +2588,16 @@ public class TestDFSShell {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/foo");
fs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission to write xattr
fs.setPermission(p, new FsPermission((short) 0700));
user.doAs(new PrivilegedExceptionAction<Object>() {
@ -2536,18 +2607,18 @@ public class TestDFSShell {
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed",
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
// No permission to read and remove
fs.setPermission(p, new FsPermission((short) 0750));
user.doAs(new PrivilegedExceptionAction<Object>() {
@ -2560,7 +2631,7 @@ public class TestDFSShell {
String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-x", "user.a1", "/foo"});
@ -2568,7 +2639,7 @@ public class TestDFSShell {
str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
out.reset();
return null;
}
});
@ -2683,28 +2754,28 @@ public class TestDFSShell {
}
/**
*
*
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
*
*
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
*
*
* As user2: Set an xattr (should pass with path access).
*
*
* As user1: Set an xattr (should pass).
*
*
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
*
*
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
*
*
* As user1: Change permissions only to owner
*
*
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
*
*
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test (timeout = 30000)
@ -2742,7 +2813,7 @@ public class TestDFSShell {
return null;
}
});
//Test 2. Give access to others
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
@ -2809,7 +2880,7 @@ public class TestDFSShell {
return null;
}
});
// Test 7. Change permission to have path access only to owner(user1)
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
@ -2822,7 +2893,7 @@ public class TestDFSShell {
return null;
}
});
// Test 8. There should be no permissions to set for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@ -2839,7 +2910,7 @@ public class TestDFSShell {
return null;
}
});
// Test 9. There should be no permissions to remove for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@ -2856,7 +2927,7 @@ public class TestDFSShell {
return null;
}
});
// Test 10. Superuser should be allowed to set with trusted namespace
SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
@Override