HADOOP-10522. JniBasedUnixGroupMapping mishandles errors. Contributed by Kihwal Lee.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1588949 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-04-21 18:27:16 +00:00
parent cf61a551f1
commit 360c8d1a18
4 changed files with 86 additions and 39 deletions

View File

@ -423,6 +423,8 @@ Release 2.4.1 - UNRELEASED
HADOOP-10490. TestMapFile and TestBloomMapFile leak file descriptors. HADOOP-10490. TestMapFile and TestBloomMapFile leak file descriptors.
(cnauroth) (cnauroth)
HADOOP-10522. JniBasedUnixGroupMapping mishandles errors. (kihwal)
Release 2.4.0 - 2014-04-07 Release 2.4.0 - 2014-04-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -120,10 +120,18 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupsForUser
goto done; goto done;
} }
ret = hadoop_user_info_fetch(uinfo, username); ret = hadoop_user_info_fetch(uinfo, username);
if (ret) {
if (ret == ENOENT) { if (ret == ENOENT) {
jgroups = (*env)->NewObjectArray(env, 0, g_string_clazz, NULL); jgroups = (*env)->NewObjectArray(env, 0, g_string_clazz, NULL);
} else { // handle other errors
char buf[128];
snprintf(buf, sizeof(buf), "getgrouplist: error looking up user. %d (%s)",
ret, terror(ret));
THROW(env, "java/lang/RuntimeException", buf);
}
goto done; goto done;
} }
ginfo = hadoop_group_info_alloc(); ginfo = hadoop_group_info_alloc();
if (!ginfo) { if (!ginfo) {
THROW(env, "java/lang/OutOfMemoryError", NULL); THROW(env, "java/lang/OutOfMemoryError", NULL);
@ -135,7 +143,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupsForUser
THROW(env, "java/lang/OutOfMemoryError", NULL); THROW(env, "java/lang/OutOfMemoryError", NULL);
} else { } else {
char buf[128]; char buf[128];
snprintf(buf, sizeof(buf), "getgrouplist error %d (%s)", snprintf(buf, sizeof(buf), "getgrouplist: error looking up groups. %d (%s)",
ret, terror(ret)); ret, terror(ret));
THROW(env, "java/lang/RuntimeException", buf); THROW(env, "java/lang/RuntimeException", buf);
} }

View File

@ -27,6 +27,8 @@
#include <string.h> #include <string.h>
#include <unistd.h> #include <unistd.h>
#define MAX_GROUP_LOOKUP_TRIES 5
struct hadoop_group_info *hadoop_group_info_alloc(void) struct hadoop_group_info *hadoop_group_info_alloc(void)
{ {
struct hadoop_group_info *ginfo; struct hadoop_group_info *ginfo;
@ -84,23 +86,14 @@ static int getgrgid_error_translate(int err)
int hadoop_group_info_fetch(struct hadoop_group_info *ginfo, gid_t gid) int hadoop_group_info_fetch(struct hadoop_group_info *ginfo, gid_t gid)
{ {
struct group *group; struct group *group;
int err; int ret, i;
size_t buf_sz; size_t buf_sz;
char *nbuf; char *nbuf;
hadoop_group_info_clear(ginfo); hadoop_group_info_clear(ginfo);
for (;;) { for (i = 0, ret = 0; i < MAX_GROUP_LOOKUP_TRIES; i++) {
do { // If the previous call returned ERANGE, increase the buffer size
group = NULL; if (ret == ERANGE) {
err = getgrgid_r(gid, &ginfo->group, ginfo->buf,
ginfo->buf_sz, &group);
} while ((!group) && (err == EINTR));
if (group) {
return 0;
}
if (err != ERANGE) {
return getgrgid_error_translate(errno);
}
buf_sz = ginfo->buf_sz * 2; buf_sz = ginfo->buf_sz * 2;
nbuf = realloc(ginfo->buf, buf_sz); nbuf = realloc(ginfo->buf, buf_sz);
if (!nbuf) { if (!nbuf) {
@ -109,6 +102,32 @@ int hadoop_group_info_fetch(struct hadoop_group_info *ginfo, gid_t gid)
ginfo->buf = nbuf; ginfo->buf = nbuf;
ginfo->buf_sz = buf_sz; ginfo->buf_sz = buf_sz;
} }
// The following call returns errno. Reading the global errno wihtout
// locking is not thread-safe.
group = NULL;
ret = getgrgid_r(gid, &ginfo->group, ginfo->buf,
ginfo->buf_sz, &group);
switch(ret) {
case 0:
if (!group) {
// The underlying library likely has a bug.
return EIO;
}
return 0;
case EINTR:
case ERANGE:
// Retry on these errors.
// EINTR: a signal was handled and this thread was allowed to continue.
// ERANGE: the buffer was not big enough.
break;
default:
// Lookup failed.
return getgrgid_error_translate(ret);
}
}
// Did not succeed after the retries. Return the last error.
return getgrgid_error_translate(ret);
} }
#ifdef GROUP_TESTING #ifdef GROUP_TESTING

View File

@ -28,6 +28,7 @@
#include <unistd.h> #include <unistd.h>
#define INITIAL_GIDS_SIZE 32 #define INITIAL_GIDS_SIZE 32
#define MAX_USER_LOOKUP_TRIES 5
struct hadoop_user_info *hadoop_user_info_alloc(void) struct hadoop_user_info *hadoop_user_info_alloc(void)
{ {
@ -95,23 +96,14 @@ int hadoop_user_info_fetch(struct hadoop_user_info *uinfo,
const char *username) const char *username)
{ {
struct passwd *pwd; struct passwd *pwd;
int err; int ret, i;
size_t buf_sz; size_t buf_sz;
char *nbuf; char *nbuf;
hadoop_user_info_clear(uinfo); hadoop_user_info_clear(uinfo);
for (;;) { for (i = 0, ret = 0; i < MAX_USER_LOOKUP_TRIES; i++) {
do { // If the previous call returned ERANGE, increase the buffer size
pwd = NULL; if (ret == ERANGE) {
err = getpwnam_r(username, &uinfo->pwd, uinfo->buf,
uinfo->buf_sz, &pwd);
} while ((!pwd) && (errno == EINTR));
if (pwd) {
return 0;
}
if (err != ERANGE) {
return getpwnam_error_translate(errno);
}
buf_sz = uinfo->buf_sz * 2; buf_sz = uinfo->buf_sz * 2;
nbuf = realloc(uinfo->buf, buf_sz); nbuf = realloc(uinfo->buf, buf_sz);
if (!nbuf) { if (!nbuf) {
@ -120,6 +112,32 @@ int hadoop_user_info_fetch(struct hadoop_user_info *uinfo,
uinfo->buf = nbuf; uinfo->buf = nbuf;
uinfo->buf_sz = buf_sz; uinfo->buf_sz = buf_sz;
} }
// The following call returns errno. Reading the global errno wihtout
// locking is not thread-safe.
pwd = NULL;
ret = getpwnam_r(username, &uinfo->pwd, uinfo->buf,
uinfo->buf_sz, &pwd);
switch(ret) {
case 0:
if (!pwd) {
// The underlying library likely has a bug.
return EIO;
}
return 0;
case EINTR:
case ERANGE:
// Retry on these errors.
// EINTR: a signal was handled and this thread was allowed to continue.
// ERANGE: the buffer was not big enough.
break;
default:
// Lookup failed.
return getpwnam_error_translate(ret);
}
}
// Did not succeed after the retries. Return the last error.
return getpwnam_error_translate(ret);
} }
static int put_primary_gid_first(struct hadoop_user_info *uinfo) static int put_primary_gid_first(struct hadoop_user_info *uinfo)