HDFS-4997. libhdfs does not return correct error code in most cases (cmccabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1547637 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-12-03 23:13:02 +00:00
parent f7fe50d55f
commit 61c45aae4d
4 changed files with 106 additions and 61 deletions

View File

@ -767,6 +767,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5563. NFS gateway should commit the buffered data when read request comes HDFS-5563. NFS gateway should commit the buffered data when read request comes
after write to the same file (brandonli) after write to the same file (brandonli)
HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -39,14 +39,6 @@ struct jsonException {
const char *message; const char *message;
}; };
static void dotsToSlashes(char *str)
{
for (; *str != '\0'; str++) {
if (*str == '.')
*str = '/';
}
}
/** Print out the JSON exception information */ /** Print out the JSON exception information */
static int printJsonExceptionV(struct jsonException *exc, int noPrintFlags, static int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
const char *fmt, va_list ap) const char *fmt, va_list ap)
@ -62,7 +54,6 @@ static int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
fprintf(stderr, "printJsonExceptionV: internal out of memory error\n"); fprintf(stderr, "printJsonExceptionV: internal out of memory error\n");
return EINTERNAL; return EINTERNAL;
} }
dotsToSlashes(javaClassName);
getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint); getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint);
free(javaClassName); free(javaClassName);

View File

@ -35,36 +35,55 @@ struct ExceptionInfo {
static const struct ExceptionInfo gExceptionInfo[] = { static const struct ExceptionInfo gExceptionInfo[] = {
{ {
.name = "java/io/FileNotFoundException", .name = "java.io.FileNotFoundException",
.noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND, .noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
.excErrno = ENOENT, .excErrno = ENOENT,
}, },
{ {
.name = "org/apache/hadoop/security/AccessControlException", .name = "org.apache.hadoop.security.AccessControlException",
.noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL, .noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
.excErrno = EACCES, .excErrno = EACCES,
}, },
{ {
.name = "org/apache/hadoop/fs/UnresolvedLinkException", .name = "org.apache.hadoop.fs.UnresolvedLinkException",
.noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK, .noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
.excErrno = ENOLINK, .excErrno = ENOLINK,
}, },
{ {
.name = "org/apache/hadoop/fs/ParentNotDirectoryException", .name = "org.apache.hadoop.fs.ParentNotDirectoryException",
.noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY, .noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
.excErrno = ENOTDIR, .excErrno = ENOTDIR,
}, },
{ {
.name = "java/lang/IllegalArgumentException", .name = "java.lang.IllegalArgumentException",
.noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT, .noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
.excErrno = EINVAL, .excErrno = EINVAL,
}, },
{ {
.name = "java/lang/OutOfMemoryError", .name = "java.lang.OutOfMemoryError",
.noPrintFlag = 0, .noPrintFlag = 0,
.excErrno = ENOMEM, .excErrno = ENOMEM,
}, },
{
.name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
.noPrintFlag = 0,
.excErrno = EROFS,
},
{
.name = "org.apache.hadoop.fs.FileAlreadyExistsException",
.noPrintFlag = 0,
.excErrno = EEXIST,
},
{
.name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
.noPrintFlag = 0,
.excErrno = EDQUOT,
},
{
.name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
.noPrintFlag = 0,
.excErrno = ESTALE,
},
}; };
void getExceptionInfo(const char *excName, int noPrintFlags, void getExceptionInfo(const char *excName, int noPrintFlags,

View File

@ -48,7 +48,8 @@ struct tlhThreadInfo {
pthread_t thread; pthread_t thread;
}; };
static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs) static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
const char *username)
{ {
int ret, port; int ret, port;
hdfsFS hdfs; hdfsFS hdfs;
@ -70,6 +71,9 @@ static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs
TO_STR(TLH_DEFAULT_BLOCK_SIZE)); TO_STR(TLH_DEFAULT_BLOCK_SIZE));
hdfsBuilderConfSetStr(bld, "dfs.blocksize", hdfsBuilderConfSetStr(bld, "dfs.blocksize",
TO_STR(TLH_DEFAULT_BLOCK_SIZE)); TO_STR(TLH_DEFAULT_BLOCK_SIZE));
if (username) {
hdfsBuilderSetUserName(bld, username);
}
hdfs = hdfsBuilderConnect(bld); hdfs = hdfsBuilderConnect(bld);
if (!hdfs) { if (!hdfs) {
ret = -errno; ret = -errno;
@ -110,36 +114,58 @@ static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
return 0; return 0;
} }
static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs) struct tlhPaths {
char prefix[256];
char file1[256];
char file2[256];
};
static int setupPaths(const struct tlhThreadInfo *ti, struct tlhPaths *paths)
{ {
char prefix[256], tmp[256]; memset(paths, sizeof(*paths), 0);
if (snprintf(paths->prefix, sizeof(paths->prefix), "/tlhData%04d",
ti->threadIdx) >= sizeof(paths->prefix)) {
return ENAMETOOLONG;
}
if (snprintf(paths->file1, sizeof(paths->file1), "%s/file1",
paths->prefix) >= sizeof(paths->file1)) {
return ENAMETOOLONG;
}
if (snprintf(paths->file2, sizeof(paths->file2), "%s/file2",
paths->prefix) >= sizeof(paths->file2)) {
return ENAMETOOLONG;
}
return 0;
}
static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
const struct tlhPaths *paths)
{
char tmp[4096];
hdfsFile file; hdfsFile file;
int ret, expected; int ret, expected;
hdfsFileInfo *fileInfo; hdfsFileInfo *fileInfo;
struct hdfsReadStatistics *readStats = NULL; struct hdfsReadStatistics *readStats = NULL;
snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx); if (hdfsExists(fs, paths->prefix) == 0) {
EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
if (hdfsExists(fs, prefix) == 0) {
EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
} }
EXPECT_ZERO(hdfsCreateDirectory(fs, prefix)); EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));
snprintf(tmp, sizeof(tmp), "%s/file", prefix);
EXPECT_ZERO(doTestGetDefaultBlockSize(fs, prefix)); EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));
/* There should not be any file to open for reading. */ /* There should not be any file to open for reading. */
EXPECT_NULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0)); EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));
/* hdfsOpenFile should not accept mode = 3 */ /* hdfsOpenFile should not accept mode = 3 */
EXPECT_NULL(hdfsOpenFile(fs, tmp, 3, 0, 0, 0)); EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));
file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0); file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
EXPECT_NONNULL(file); EXPECT_NONNULL(file);
/* TODO: implement writeFully and use it here */ /* TODO: implement writeFully and use it here */
expected = strlen(prefix); expected = strlen(paths->prefix);
ret = hdfsWrite(fs, file, prefix, expected); ret = hdfsWrite(fs, file, paths->prefix, expected);
if (ret < 0) { if (ret < 0) {
ret = errno; ret = errno;
fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret); fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
@ -155,7 +181,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
EXPECT_ZERO(hdfsCloseFile(fs, file)); EXPECT_ZERO(hdfsCloseFile(fs, file));
/* Let's re-open the file for reading */ /* Let's re-open the file for reading */
file = hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0); file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
EXPECT_NONNULL(file); EXPECT_NONNULL(file);
EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats)); EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
@ -180,60 +206,67 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
errno = 0; errno = 0;
EXPECT_INT_EQ(expected, readStats->totalBytesRead); EXPECT_INT_EQ(expected, readStats->totalBytesRead);
hdfsFileFreeReadStatistics(readStats); hdfsFileFreeReadStatistics(readStats);
EXPECT_ZERO(memcmp(prefix, tmp, expected)); EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
EXPECT_ZERO(hdfsCloseFile(fs, file)); EXPECT_ZERO(hdfsCloseFile(fs, file));
// TODO: Non-recursive delete should fail? // TODO: Non-recursive delete should fail?
//EXPECT_NONZERO(hdfsDelete(fs, prefix, 0)); //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));
snprintf(tmp, sizeof(tmp), "%s/file", prefix); EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
EXPECT_ZERO(hdfsChown(fs, tmp, NULL, NULL)); EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop")); fileInfo = hdfsGetPathInfo(fs, paths->file2);
fileInfo = hdfsGetPathInfo(fs, tmp);
EXPECT_NONNULL(fileInfo); EXPECT_NONNULL(fileInfo);
EXPECT_ZERO(strcmp("doop", fileInfo->mGroup)); EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
hdfsFreeFileInfo(fileInfo, 1); hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2")); EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
fileInfo = hdfsGetPathInfo(fs, tmp); fileInfo = hdfsGetPathInfo(fs, paths->file2);
EXPECT_NONNULL(fileInfo); EXPECT_NONNULL(fileInfo);
EXPECT_ZERO(strcmp("ha", fileInfo->mOwner)); EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup)); EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
hdfsFreeFileInfo(fileInfo, 1); hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL)); EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
fileInfo = hdfsGetPathInfo(fs, tmp); fileInfo = hdfsGetPathInfo(fs, paths->file2);
EXPECT_NONNULL(fileInfo); EXPECT_NONNULL(fileInfo);
EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner)); EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup)); EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
hdfsFreeFileInfo(fileInfo, 1); hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsDelete(fs, prefix, 1)); snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
return 0;
}
static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
{
hdfsFS fs = NULL;
struct tlhPaths paths;
fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
ti->threadIdx);
EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
EXPECT_ZERO(setupPaths(ti, &paths));
// test some operations
EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
EXPECT_ZERO(hdfsDisconnect(fs));
// reconnect as user "foo" and verify that we get permission errors
EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, "foo"));
EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, paths.file1, "ha3", NULL), EACCES);
EXPECT_ZERO(hdfsDisconnect(fs));
// reconnect to do the final delete.
EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
EXPECT_ZERO(hdfsDisconnect(fs));
return 0; return 0;
} }
static void *testHdfsOperations(void *v) static void *testHdfsOperations(void *v)
{ {
struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v; struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
hdfsFS fs = NULL; int ret = testHdfsOperationsImpl(ti);
int ret;
fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
ti->threadIdx);
ret = hdfsSingleNameNodeConnect(tlhCluster, &fs);
if (ret) {
fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
"hdfsSingleNameNodeConnect failed with error %d.\n",
ti->threadIdx, ret);
ti->success = EIO;
return NULL;
}
ti->success = doTestHdfsOperations(ti, fs);
if (hdfsDisconnect(fs)) {
ret = errno;
fprintf(stderr, "hdfsDisconnect error %d\n", ret);
ti->success = ret; ti->success = ret;
}
return NULL; return NULL;
} }