HDFS-4139. fuse-dfs RO mode still allows file truncation. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1409093 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-11-14 06:19:20 +00:00
parent 502d6b940d
commit a32639ac33
8 changed files with 15 additions and 24 deletions

View File

@ -237,6 +237,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4171. WebHDFS and HttpFs should accept only valid Unix user names. (tucu) HDFS-4171. WebHDFS and HttpFs should accept only valid Unix user names. (tucu)
HDFS-4139. fuse-dfs RO mode still allows file truncation.
(Colin Patrick McCabe via eli)
Release 2.0.2-alpha - 2012-09-07 Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -31,7 +31,6 @@
// //
typedef struct dfs_context_struct { typedef struct dfs_context_struct {
int debug; int debug;
int read_only;
int usetrash; int usetrash;
int direct_io; int direct_io;
char **protectedpaths; char **protectedpaths;

View File

@ -93,6 +93,18 @@ int main(int argc, char *argv[])
if (!options.no_permissions) { if (!options.no_permissions) {
fuse_opt_add_arg(&args, "-odefault_permissions"); fuse_opt_add_arg(&args, "-odefault_permissions");
} }
/*
* FUSE already has a built-in parameter for mounting the filesystem as
* read-only, -r. We defined our own parameter for doing this called -oro.
* We support it by translating it into -r internally.
* The kernel intercepts and returns an error message for any "write"
* operations that the user attempts to perform on a read-only filesystem.
* That means that we don't have to write any code to handle read-only mode.
* See HDFS-4139 for more details.
*/
if (options.read_only) {
fuse_opt_add_arg(&args, "-r");
}
{ {
char buf[80]; char buf[80];

View File

@ -39,11 +39,6 @@ int dfs_mkdir(const char *path, mode_t mode)
return -EACCES; return -EACCES;
} }
if (dfs->read_only) {
ERROR("HDFS is configured read-only, cannot create directory %s", path);
return -EACCES;
}
ret = fuseConnectAsThreadUid(&conn); ret = fuseConnectAsThreadUid(&conn);
if (ret) { if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs " fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

View File

@ -43,11 +43,6 @@ int dfs_rename(const char *from, const char *to)
return -EACCES; return -EACCES;
} }
if (dfs->read_only) {
ERROR("HDFS configured read-only, cannot rename directory %s", from);
return -EACCES;
}
ret = fuseConnectAsThreadUid(&conn); ret = fuseConnectAsThreadUid(&conn);
if (ret) { if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs " fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

View File

@ -44,12 +44,6 @@ int dfs_rmdir(const char *path)
goto cleanup; goto cleanup;
} }
if (dfs->read_only) {
ERROR("HDFS configured read-only, cannot delete directory %s", path);
ret = -EACCES;
goto cleanup;
}
ret = fuseConnectAsThreadUid(&conn); ret = fuseConnectAsThreadUid(&conn);
if (ret) { if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs " fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

View File

@ -40,12 +40,6 @@ int dfs_unlink(const char *path)
goto cleanup; goto cleanup;
} }
if (dfs->read_only) {
ERROR("HDFS configured read-only, cannot create directory %s", path);
ret = -EACCES;
goto cleanup;
}
ret = fuseConnectAsThreadUid(&conn); ret = fuseConnectAsThreadUid(&conn);
if (ret) { if (ret) {
fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs " fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "

View File

@ -114,7 +114,6 @@ void *dfs_init(void)
// initialize the context // initialize the context
dfs->debug = options.debug; dfs->debug = options.debug;
dfs->read_only = options.read_only;
dfs->usetrash = options.usetrash; dfs->usetrash = options.usetrash;
dfs->protectedpaths = NULL; dfs->protectedpaths = NULL;
dfs->rdbuffer_size = options.rdbuffer_size; dfs->rdbuffer_size = options.rdbuffer_size;