HDFS-4913. Deleting file through fuse-dfs when using trash fails, requiring root permissions (cmccabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1595371 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2014-05-16 22:24:33 +00:00
parent da3992b4e3
commit f25c33b65f
2 changed files with 200 additions and 80 deletions

View File

@ -495,6 +495,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality
(Binglin Chang and Chen He via junping_du)
HDFS-4913. Deleting file through fuse-dfs when using trash fails requiring
root permissions (cmccabe)
Release 2.4.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -16,111 +16,228 @@
* limitations under the License.
*/
#include <hdfs.h>
#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include "fuse_context_handle.h"
#include "fuse_dfs.h"
#include "fuse_trash.h"
#include "fuse_context_handle.h"
const char *const TrashPrefixDir = "/user/root/.Trash";
const char *const TrashDir = "/user/root/.Trash/Current";
#include "fuse_users.h"
#define TRASH_RENAME_TRIES 100
#define ALREADY_IN_TRASH_ERR 9000
/**
* Split a path into a parent directory and a base path component.
*
* @param abs_path The absolute path.
* @param pcomp (out param) Will be set to the last path component.
* Malloced.
* @param parent_dir (out param) Will be set to the parent directory.
* Malloced.
*
* @return 0 on success.
* On success, both *pcomp and *parent_dir will contain
* malloc'ed strings.
* EINVAL if the path wasn't absolute.
* EINVAL if there is no parent directory (i.e. abs_path=/)
* ENOMEM if we ran out of memory.
*/
static int get_parent_dir(const char *abs_path, char **pcomp,
char **parent_dir)
{
int ret;
char *pdir = NULL, *pc = NULL, *last_slash;
pdir = strdup(abs_path);
if (!pdir) {
ret = ENOMEM;
goto done;
}
last_slash = rindex(pdir, '/');
if (!last_slash) {
ERROR("get_parent_dir(%s): expected absolute path.\n", abs_path);
ret = EINVAL;
goto done;
}
if (last_slash[1] == '\0') {
*last_slash = '\0';
last_slash = rindex(pdir, '/');
if (!last_slash) {
ERROR("get_parent_dir(%s): there is no parent dir.\n", abs_path);
ret = EINVAL;
goto done;
}
}
pc = strdup(last_slash + 1);
if (!pc) {
ret = ENOMEM;
goto done;
}
*last_slash = '\0';
ret = 0;
done:
if (ret) {
free(pdir);
free(pc);
return ret;
}
*pcomp = pc;
*parent_dir = pdir;
return 0;
}
/**
* Get the base path to the trash. This will depend on the user ID.
* For example, a user whose ID maps to 'foo' will get back the path
* "/user/foo/.Trash/Current".
*
* @param trash_base (out param) the base path to the trash.
* Malloced.
*
* @return 0 on success; error code otherwise.
*/
static int get_trash_base(char **trash_base)
{
const char * const PREFIX = "/user/";
const char * const SUFFIX = "/.Trash/Current";
char *user_name = NULL, *base = NULL;
uid_t uid = fuse_get_context()->uid;
int ret;
user_name = getUsername(uid);
if (!user_name) {
ERROR("get_trash_base(): failed to get username for uid %"PRId64"\n",
(uint64_t)uid);
ret = EIO;
goto done;
}
if (asprintf(&base, "%s%s%s", PREFIX, user_name, SUFFIX) < 0) {
base = NULL;
ret = ENOMEM;
goto done;
}
ret = 0;
done:
free(user_name);
if (ret) {
free(base);
return ret;
}
*trash_base = base;
return 0;
}
//
// NOTE: this function is a c implementation of org.apache.hadoop.fs.Trash.moveToTrash(Path path).
//
int move_to_trash(const char *abs_path, hdfsFS userFS)
{
int ret;
char *pcomp = NULL, *parent_dir = NULL, *trash_base = NULL;
char *target_dir = NULL, *target = NULL;
int move_to_trash(const char *item, hdfsFS userFS) {
// retrieve dfs specific data
dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
// check params and the context var
assert(item);
assert(dfs);
assert('/' == *item);
assert(rindex(item,'/') >= 0);
char fname[4096]; // or last element of the directory path
char parent_dir[4096]; // the directory the fname resides in
if (strlen(item) > sizeof(fname) - strlen(TrashDir)) {
ERROR("Buffer too small to accomodate path of len %d", (int)strlen(item));
return -EIO;
ret = get_parent_dir(abs_path, &pcomp, &parent_dir);
if (ret) {
goto done;
}
// separate the file name and the parent directory of the item to be deleted
{
int length_of_parent_dir = rindex(item, '/') - item ;
int length_of_fname = strlen(item) - length_of_parent_dir - 1; // the '/'
// note - the below strncpys should be safe from overflow because of the check on item's string length above.
strncpy(parent_dir, item, length_of_parent_dir);
parent_dir[length_of_parent_dir ] = 0;
strncpy(fname, item + length_of_parent_dir + 1, strlen(item));
fname[length_of_fname + 1] = 0;
ret = get_trash_base(&trash_base);
if (ret) {
goto done;
}
// create the target trash directory
char trash_dir[4096];
if (snprintf(trash_dir, sizeof(trash_dir), "%s%s", TrashDir, parent_dir)
>= sizeof trash_dir) {
ERROR("Move to trash error target not big enough for %s", item);
return -EIO;
if (!strncmp(trash_base, abs_path, strlen(trash_base))) {
INFO("move_to_trash(%s): file is already in the trash; deleting.",
abs_path);
ret = ALREADY_IN_TRASH_ERR;
goto done;
}
fprintf(stderr, "trash_base='%s'\n", trash_base);
if (asprintf(&target_dir, "%s%s", trash_base, parent_dir) < 0) {
ret = ENOMEM;
target_dir = NULL;
goto done;
}
if (asprintf(&target, "%s/%s", target_dir, pcomp) < 0) {
ret = ENOMEM;
target = NULL;
goto done;
}
// create the target trash directory in trash (if needed)
if ( hdfsExists(userFS, trash_dir)) {
if (hdfsExists(userFS, target_dir) != 0) {
// make the directory to put it in in the Trash - NOTE
// hdfsCreateDirectory also creates parents, so Current will be created if it does not exist.
if (hdfsCreateDirectory(userFS, trash_dir)) {
return -EIO;
if (hdfsCreateDirectory(userFS, target_dir)) {
ret = errno;
ERROR("move_to_trash(%s) error: hdfsCreateDirectory(%s) failed with error %d",
abs_path, target_dir, ret);
goto done;
}
} else if (hdfsExists(userFS, target) == 0) {
// If there is already a file in the trash with this path, append a number.
int idx;
for (idx = 1; idx < TRASH_RENAME_TRIES; idx++) {
free(target);
if (asprintf(&target, "%s%s.%d", target_dir, pcomp, idx) < 0) {
target = NULL;
ret = ENOMEM;
goto done;
}
if (hdfsExists(userFS, target) != 0) {
break;
}
}
if (idx == TRASH_RENAME_TRIES) {
ERROR("move_to_trash(%s) error: there are already %d files in the trash "
"with this name.\n", abs_path, TRASH_RENAME_TRIES);
ret = EINVAL;
goto done;
}
}
//
// if the target path in Trash already exists, then append with
// a number. Start from 1.
//
char target[4096];
int j ;
if ( snprintf(target, sizeof target,"%s/%s",trash_dir, fname) >= sizeof target) {
ERROR("Move to trash error target not big enough for %s", item);
return -EIO;
if (hdfsRename(userFS, abs_path, target)) {
ret = errno;
ERROR("move_to_trash(%s): failed to rename the file to %s: error %d",
abs_path, target, ret);
goto done;
}
// NOTE: this loop differs from the java version by capping the #of tries
for (j = 1; ! hdfsExists(userFS, target) && j < TRASH_RENAME_TRIES ; j++) {
if (snprintf(target, sizeof target,"%s/%s.%d",trash_dir, fname, j) >= sizeof target) {
ERROR("Move to trash error target not big enough for %s", item);
return -EIO;
ret = 0;
done:
if ((ret != 0) && (ret != ALREADY_IN_TRASH_ERR)) {
ERROR("move_to_trash(%s) failed with error %d", abs_path, ret);
}
free(pcomp);
free(parent_dir);
free(trash_base);
free(target_dir);
free(target);
return ret;
}
int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash)
{
int tried_to_move_to_trash = 0;
if (useTrash) {
tried_to_move_to_trash = 1;
if (move_to_trash(path, userFS) == 0) {
return 0;
}
}
if (hdfsRename(userFS, item, target)) {
ERROR("Trying to rename %s to %s", item, target);
return -EIO;
}
return 0;
}
int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash) {
// move the file to the trash if this is enabled and its not actually in the trash.
if (useTrash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
int ret= move_to_trash(path, userFS);
return ret;
}
if (hdfsDelete(userFS, path, 1)) {
ERROR("Trying to delete the file %s", path);
return -EIO;
int err = errno;
if (err < 0) {
err = -err;
}
ERROR("hdfsDeleteWithTrash(%s): hdfsDelete failed: error %d.",
path, err);
return -err;
}
if (tried_to_move_to_trash) {
ERROR("hdfsDeleteWithTrash(%s): deleted the file instead.\n", path);
}
return 0;
}