HDFS-1249. With fuse-dfs, chown which only has owner (or only group) argument fails with Input/output error. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1363466 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-07-19 18:37:47 +00:00
parent 7d5d63c13e
commit 5d5a83877a
5 changed files with 59 additions and 25 deletions

View File

@ -349,6 +349,9 @@ Branch-2 ( Unreleased changes )
HDFS-3675. libhdfs: follow documented return codes. (Colin Patrick McCabe via eli)
HDFS-1249. With fuse-dfs, chown which only has owner (or only group)
argument fails with Input/output error. (Colin Patrick McCabe via eli)
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log

View File

@ -40,19 +40,26 @@ int dfs_chown(const char *path, uid_t uid, gid_t gid)
assert(dfs);
assert('/' == *path);
if ((uid == -1) && (gid == -1)) {
ret = 0;
goto cleanup;
}
if (uid != -1) {
user = getUsername(uid);
if (NULL == user) {
ERROR("Could not lookup the user id string %d",(int)uid);
ret = -EIO;
goto cleanup;
}
}
if (gid != -1) {
group = getGroup(gid);
if (group == NULL) {
ERROR("Could not lookup the group id string %d",(int)gid);
ret = -EIO;
goto cleanup;
}
}
userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
if (userFS == NULL) {
@ -62,8 +69,9 @@ int dfs_chown(const char *path, uid_t uid, gid_t gid)
}
if (hdfsChown(userFS, path, user, group)) {
ERROR("Could not chown %s to %d:%d", path, (int)uid, gid);
ret = (errno > 0) ? -errno : -EIO;
ret = errno;
ERROR("Could not chown %s to %d:%d: error %d", path, (int)uid, gid, ret);
ret = (ret > 0) ? -ret : -EIO;
goto cleanup;
}

View File

@ -1811,9 +1811,7 @@ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
}
if (owner == NULL && group == NULL) {
fprintf(stderr, "Both owner and group cannot be null in chown");
errno = EINVAL;
return -1;
return 0;
}
jobject jFS = (jobject)fs;
@ -1823,8 +1821,8 @@ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
return -1;
}
jstring jOwnerString = (*env)->NewStringUTF(env, owner);
jstring jGroupString = (*env)->NewStringUTF(env, group);
jstring jOwnerString = owner ? (*env)->NewStringUTF(env, owner) : NULL;
jstring jGroupString = group ? (*env)->NewStringUTF(env, group) : NULL;
//Create the directory
int ret = 0;

View File

@ -556,14 +556,16 @@ extern "C" {
tOffset hdfsGetUsed(hdfsFS fs);
/**
* hdfsChown
* Change the user and/or group of a file or directory.
*
* @param fs The configured filesystem handle.
* @param path the path to the file or directory
* @param owner this is a string in Hadoop land. Set to null or "" if only setting group
* @param group this is a string in Hadoop land. Set to null or "" if only setting user
* @param owner User string. Set to NULL for 'no change'
* @param group Group string. Set to NULL for 'no change'
* @return 0 on success else -1
*/
int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group);
int hdfsChown(hdfsFS fs, const char* path, const char *owner,
const char *group);
/**
* hdfsChmod

View File

@ -67,6 +67,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
char prefix[256], tmp[256];
hdfsFile file;
int ret, expected;
hdfsFileInfo *fileInfo;
snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
@ -120,6 +121,28 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
// TODO: Non-recursive delete should fail?
//EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
snprintf(tmp, sizeof(tmp), "%s/file", prefix);
EXPECT_ZERO(hdfsChown(fs, tmp, NULL, NULL));
EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
fileInfo = hdfsGetPathInfo(fs, tmp);
EXPECT_NONNULL(fileInfo);
EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
fileInfo = hdfsGetPathInfo(fs, tmp);
EXPECT_NONNULL(fileInfo);
EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
fileInfo = hdfsGetPathInfo(fs, tmp);
EXPECT_NONNULL(fileInfo);
EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
return 0;
}