From 151281bfcaedada9a0f6d5a6d7afdf70f83a3dde Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 24 Sep 2014 08:35:31 -0700 Subject: [PATCH] HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop-hdfs/src/CMakeLists.txt | 17 +++++++--- .../src/main/native/libhdfs/hdfs.c | 2 +- .../native/libhdfs/test/test_libhdfs_ops.c | 22 ++++++------- .../src/main/native/libhdfs/test/vecsum.c | 33 +++++++++++++++++-- .../native/libhdfs/test_libhdfs_threaded.c | 2 +- 6 files changed, 58 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a9bb227a3f3..ddc143ac5b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -593,6 +593,8 @@ Release 2.6.0 - UNRELEASED HDFS-7130. TestDataTransferKeepalive fails intermittently on Windows. (cnauroth) + HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt index 854988b9c54..227be45da5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt @@ -211,11 +211,18 @@ if (NOT WIN32) add_executable(test_libhdfs_vecsum main/native/libhdfs/test/vecsum.c ) - target_link_libraries(test_libhdfs_vecsum - hdfs - pthread - rt - ) + if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + target_link_libraries(test_libhdfs_vecsum + hdfs + pthread + ) + else (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + target_link_libraries(test_libhdfs_vecsum + hdfs + pthread + rt + ) + endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") endif(NOT WIN32) IF(REQUIRE_LIBWEBHDFS) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c index 7c87adfd692..21f9c2ba970 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c @@ -3215,7 +3215,7 @@ static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo) free(hdfsFileInfo->mName); free(hdfsFileInfo->mOwner); free(hdfsFileInfo->mGroup); - memset(hdfsFileInfo, 0, sizeof(hdfsFileInfo)); + memset(hdfsFileInfo, 0, sizeof(*hdfsFileInfo)); } void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c index a6e1a13abbe..aa9441a0ad8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c @@ -122,11 +122,11 @@ int main(int argc, char **argv) { currentPos = -1; if ((currentPos = hdfsTell(fs, writeFile)) == -1) { fprintf(stderr, - "Failed to get current file position correctly! Got %ld!\n", + "Failed to get current file position correctly! Got %" PRId64 "!\n", currentPos); exit(-1); } - fprintf(stderr, "Current position: %ld\n", currentPos); + fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); if (hdfsFlush(fs, writeFile)) { fprintf(stderr, "Failed to 'flush' %s\n", writePath); @@ -177,11 +177,11 @@ int main(int argc, char **argv) { currentPos = -1; if((currentPos = hdfsTell(fs, readFile)) != seekPos) { fprintf(stderr, - "Failed to get current file position correctly! Got %ld!\n", + "Failed to get current file position correctly! Got %" PRId64 "!\n", currentPos); exit(-1); } - fprintf(stderr, "Current position: %ld\n", currentPos); + fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); if (!hdfsFileUsesDirectRead(readFile)) { fprintf(stderr, "Direct read support incorrectly not detected " @@ -283,9 +283,9 @@ int main(int argc, char **argv) { fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!")); totalResult += (resp ? 0 : 1); - fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs)); - fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs)); - fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs)); + fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs)); + fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs)); + fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs)); fileInfo = NULL; if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) { @@ -293,8 +293,8 @@ int main(int argc, char **argv) { fprintf(stderr, "Name: %s, ", fileInfo->mName); fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind)); fprintf(stderr, "Replication: %d, ", fileInfo->mReplication); - fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize); - fprintf(stderr, "Size: %ld, ", fileInfo->mSize); + fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize); + fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize); fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); fprintf(stderr, "Owner: %s, ", fileInfo->mOwner); fprintf(stderr, "Group: %s, ", fileInfo->mGroup); @@ -312,8 +312,8 @@ int main(int argc, char **argv) { fprintf(stderr, "Name: %s, ", fileList[i].mName); fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind); fprintf(stderr, "Replication: %d, ", fileList[i].mReplication); - fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize); - fprintf(stderr, "Size: %ld, ", fileList[i].mSize); + fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize); + fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize); fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod)); fprintf(stderr, "Owner: %s, ", fileList[i].mOwner); fprintf(stderr, "Group: %s, ", fileList[i].mGroup); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c index fd18c9db5ee..80a64b4f737 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c @@ -29,6 +29,12 @@ #include #include +#ifdef __MACH__ // OS X does not have clock_gettime +#include +#include +#include +#endif + #include "config.h" #include "hdfs.h" @@ -49,6 +55,29 @@ struct stopwatch { struct timespec stop; }; + +#ifdef __MACH__ +static int clock_gettime_mono(struct timespec * ts) { + static mach_timebase_info_data_t tb; + static uint64_t timestart = 0; + uint64_t t = 0; + if (timestart == 0) { + mach_timebase_info(&tb); + timestart = mach_absolute_time(); + } + t = mach_absolute_time() - timestart; + t *= tb.numer; + t /= tb.denom; + ts->tv_sec = t / 1000000000ULL; + ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL); + return 0; +} +#else +static int clock_gettime_mono(struct timespec * ts) { + return clock_gettime(CLOCK_MONOTONIC, ts); +} +#endif + static struct stopwatch *stopwatch_create(void) { struct stopwatch *watch; @@ -58,7 +87,7 @@ static struct stopwatch *stopwatch_create(void) fprintf(stderr, "failed to allocate memory for stopwatch\n"); goto error; } - if (clock_gettime(CLOCK_MONOTONIC, &watch->start)) { + if (clock_gettime_mono(&watch->start)) { int err = errno; fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with " "error %d (%s)\n", err, strerror(err)); @@ -76,7 +105,7 @@ static void stopwatch_stop(struct stopwatch *watch, { double elapsed, rate; - if (clock_gettime(CLOCK_MONOTONIC, &watch->stop)) { + if (clock_gettime_mono(&watch->stop)) { int err = errno; fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with " "error %d (%s)\n", err, strerror(err)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c index 64c1a8f8634..016f0b19dde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c @@ -84,7 +84,7 @@ static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path) { - uint64_t blockSize; + int64_t blockSize; int ret; blockSize = hdfsGetDefaultBlockSize(fs);