svn merge -c 1401321 FIXES: HADOOP-8811. Compile hadoop native library in FreeBSD (Radim Kolar via bobby)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1401323 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Joseph Evans 2012-10-23 15:37:38 +00:00
parent 96cc1ad304
commit a85371a85f
9 changed files with 69 additions and 13 deletions

View File

@ -824,6 +824,9 @@ Release 0.23.5 - UNRELEASED
HADOOP-8906. paths with multiple globs are unreliable. (Daryn Sharp via HADOOP-8906. paths with multiple globs are unreliable. (Daryn Sharp via
jlowe) jlowe)
HADOOP-8811. Compile hadoop native library in FreeBSD (Radim Kolar via
bobby)
Release 0.23.4 - UNRELEASED Release 0.23.4 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -67,6 +67,9 @@ macro(set_find_shared_library_version LVERS)
IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
# Mac OS uses .dylib # Mac OS uses .dylib
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib") SET(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib")
ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
# FreeBSD has always .so installed.
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows") ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
# Windows doesn't support finding shared libraries by version. # Windows doesn't support finding shared libraries by version.
ELSE() ELSE()
@ -95,8 +98,10 @@ GET_FILENAME_COMPONENT(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
INCLUDE(CheckFunctionExists) INCLUDE(CheckFunctionExists)
INCLUDE(CheckCSourceCompiles) INCLUDE(CheckCSourceCompiles)
INCLUDE(CheckLibraryExists)
CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE) CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE) CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL)
SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES) SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
set_find_shared_library_version("1") set_find_shared_library_version("1")
@ -159,6 +164,9 @@ add_dual_library(hadoop
${D}/util/NativeCrc32.c ${D}/util/NativeCrc32.c
${D}/util/bulk_crc32.c ${D}/util/bulk_crc32.c
) )
if (NEED_LINK_DL)
set(LIB_DL dl)
endif (NEED_LINK_DL)
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# #
@ -171,7 +179,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
ENDIF() ENDIF()
target_link_dual_libraries(hadoop target_link_dual_libraries(hadoop
dl ${LIB_DL}
${JAVA_JVM_LIBRARY} ${JAVA_JVM_LIBRARY}
) )
SET(LIBHADOOP_VERSION "1.0.0") SET(LIBHADOOP_VERSION "1.0.0")

View File

@ -43,7 +43,8 @@ public class HardLink {
OS_TYPE_UNIX, OS_TYPE_UNIX,
OS_TYPE_WINXP, OS_TYPE_WINXP,
OS_TYPE_SOLARIS, OS_TYPE_SOLARIS,
OS_TYPE_MAC OS_TYPE_MAC,
OS_TYPE_FREEBSD
} }
public static OSType osType; public static OSType osType;
@ -63,7 +64,7 @@ public class HardLink {
getHardLinkCommand = new HardLinkCGUnix(); getHardLinkCommand = new HardLinkCGUnix();
//override getLinkCountCommand for the particular Unix variant //override getLinkCountCommand for the particular Unix variant
//Linux is already set as the default - {"stat","-c%h", null} //Linux is already set as the default - {"stat","-c%h", null}
if (osType == OSType.OS_TYPE_MAC) { if (osType == OSType.OS_TYPE_MAC || osType == OSType.OS_TYPE_FREEBSD) {
String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null}; String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate); HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
} else if (osType == OSType.OS_TYPE_SOLARIS) { } else if (osType == OSType.OS_TYPE_SOLARIS) {
@ -95,6 +96,9 @@ public class HardLink {
else if (osName.contains("Mac")) { else if (osName.contains("Mac")) {
return OSType.OS_TYPE_MAC; return OSType.OS_TYPE_MAC;
} }
else if (osName.contains("FreeBSD")) {
return OSType.OS_TYPE_FREEBSD;
}
else { else {
return OSType.OS_TYPE_UNIX; return OSType.OS_TYPE_UNIX;
} }

View File

@ -254,7 +254,11 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_posix_1fadvise(
int err = 0; int err = 0;
if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) { if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) {
#ifdef __FreeBSD__
throw_ioe(env, errno);
#else
throw_ioe(env, err); throw_ioe(env, err);
#endif
} }
#endif #endif
} }
@ -310,6 +314,22 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_sync_1file_1range(
#endif #endif
} }
#ifdef __FreeBSD__
static int toFreeBSDFlags(int flags)
{
int rc = flags & 03;
if ( flags & 0100 ) rc |= O_CREAT;
if ( flags & 0200 ) rc |= O_EXCL;
if ( flags & 0400 ) rc |= O_NOCTTY;
if ( flags & 01000 ) rc |= O_TRUNC;
if ( flags & 02000 ) rc |= O_APPEND;
if ( flags & 04000 ) rc |= O_NONBLOCK;
if ( flags &010000 ) rc |= O_SYNC;
if ( flags &020000 ) rc |= O_ASYNC;
return rc;
}
#endif
/* /*
* public static native FileDescriptor open(String path, int flags, int mode); * public static native FileDescriptor open(String path, int flags, int mode);
*/ */
@ -318,6 +338,9 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_open(
JNIEnv *env, jclass clazz, jstring j_path, JNIEnv *env, jclass clazz, jstring j_path,
jint flags, jint mode) jint flags, jint mode)
{ {
#ifdef __FreeBSD__
flags = toFreeBSDFlags(flags);
#endif
jobject ret = NULL; jobject ret = NULL;
const char *path = (*env)->GetStringUTFChars(env, j_path, NULL); const char *path = (*env)->GetStringUTFChars(env, j_path, NULL);
@ -399,7 +422,7 @@ err:
* Determine how big a buffer we need for reentrant getpwuid_r and getgrnam_r * Determine how big a buffer we need for reentrant getpwuid_r and getgrnam_r
*/ */
ssize_t get_pw_buflen() { ssize_t get_pw_buflen() {
size_t ret = 0; long ret = 0;
#ifdef _SC_GETPW_R_SIZE_MAX #ifdef _SC_GETPW_R_SIZE_MAX
ret = sysconf(_SC_GETPW_R_SIZE_MAX); ret = sysconf(_SC_GETPW_R_SIZE_MAX);
#endif #endif

View File

@ -46,6 +46,7 @@ JNIEXPORT jobjectArray JNICALL
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
(JNIEnv *env, jobject jobj, jstring jgroup) { (JNIEnv *env, jobject jobj, jstring jgroup) {
UserList *userListHead = NULL; UserList *userListHead = NULL;
UserList *current = NULL;
int userListSize = 0; int userListSize = 0;
// pointers to free at the end // pointers to free at the end
@ -72,8 +73,10 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
// was successful or not (as long as it was called we need to call // was successful or not (as long as it was called we need to call
// endnetgrent) // endnetgrent)
setnetgrentCalledFlag = 1; setnetgrentCalledFlag = 1;
#ifndef __FreeBSD__
if(setnetgrent(cgroup) == 1) { if(setnetgrent(cgroup) == 1) {
UserList *current = NULL; #endif
current = NULL;
// three pointers are for host, user, domain, we only care // three pointers are for host, user, domain, we only care
// about user now // about user now
char *p[3]; char *p[3];
@ -87,7 +90,9 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
userListSize++; userListSize++;
} }
} }
#ifndef __FreeBSD__
} }
#endif
//-------------------------------------------------- //--------------------------------------------------
// build return data (java array) // build return data (java array)
@ -101,7 +106,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
goto END; goto END;
} }
UserList * current = NULL; current = NULL;
// note that the loop iterates over list but also over array (i) // note that the loop iterates over list but also over array (i)
int i = 0; int i = 0;

View File

@ -78,7 +78,7 @@ int getGroupIDList(const char *user, int *ngroups, gid_t **groups) {
*/ */
int getGroupDetails(gid_t group, char **grpBuf) { int getGroupDetails(gid_t group, char **grpBuf) {
struct group * grp = NULL; struct group * grp = NULL;
size_t currBufferSize = sysconf(_SC_GETGR_R_SIZE_MAX); long currBufferSize = sysconf(_SC_GETGR_R_SIZE_MAX);
if (currBufferSize < 1024) { if (currBufferSize < 1024) {
currBufferSize = 1024; currBufferSize = 1024;
} }
@ -123,7 +123,7 @@ int getGroupDetails(gid_t group, char **grpBuf) {
*/ */
int getPW(const char *user, char **pwbuf) { int getPW(const char *user, char **pwbuf) {
struct passwd *pwbufp = NULL; struct passwd *pwbufp = NULL;
size_t currBufferSize = sysconf(_SC_GETPW_R_SIZE_MAX); long currBufferSize = sysconf(_SC_GETPW_R_SIZE_MAX);
if (currBufferSize < 1024) { if (currBufferSize < 1024) {
currBufferSize = 1024; currBufferSize = 1024;
} }

View File

@ -32,7 +32,9 @@
#include "bulk_crc32.h" #include "bulk_crc32.h"
#include "gcc_optimizations.h" #include "gcc_optimizations.h"
#ifndef __FreeBSD__
#define USE_PIPELINED #define USE_PIPELINED
#endif
#define CRC_INITIAL_VAL 0xffffffff #define CRC_INITIAL_VAL 0xffffffff
@ -260,7 +262,7 @@ static uint32_t crc32_zlib_sb8(
// Begin code for SSE4.2 specific hardware support of CRC32C // Begin code for SSE4.2 specific hardware support of CRC32C
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
#if (defined(__amd64__) || defined(__i386)) && defined(__GNUC__) #if (defined(__amd64__) || defined(__i386)) && defined(__GNUC__) && !defined(__FreeBSD__)
# define SSE42_FEATURE_BIT (1 << 20) # define SSE42_FEATURE_BIT (1 << 20)
# define CPUID_FEATURES 1 # define CPUID_FEATURES 1
/** /**

View File

@ -364,8 +364,12 @@ public class TestHardLink {
callCount = createHardLinkMult(src, fileNames, tgt_mult, maxLength); callCount = createHardLinkMult(src, fileNames, tgt_mult, maxLength);
//check the request was completed in exactly two "chunks" //check the request was completed in exactly two "chunks"
assertEquals(2, callCount); assertEquals(2, callCount);
String[] tgt_multNames = tgt_mult.list();
//sort directory listings before comparsion
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
//and check the results were as expected in the dir tree //and check the results were as expected in the dir tree
assertTrue(Arrays.deepEquals(fileNames, tgt_mult.list())); assertArrayEquals(fileNames, tgt_multNames);
//Test the case where maxlength is too small even for one filename. //Test the case where maxlength is too small even for one filename.
//It should go ahead and try the single files. //It should go ahead and try the single files.
@ -382,8 +386,12 @@ public class TestHardLink {
maxLength); maxLength);
//should go ahead with each of the three single file names //should go ahead with each of the three single file names
assertEquals(3, callCount); assertEquals(3, callCount);
//check the results were as expected in the dir tree tgt_multNames = tgt_mult.list();
assertTrue(Arrays.deepEquals(fileNames, tgt_mult.list())); //sort directory listings before comparsion
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
//and check the results were as expected in the dir tree
assertArrayEquals(fileNames, tgt_multNames);
} }
/* /*

View File

@ -224,7 +224,10 @@ public class TestNativeIO {
// we should just skip the unit test on machines where we don't // we should just skip the unit test on machines where we don't
// have fadvise support // have fadvise support
assumeTrue(false); assumeTrue(false);
} finally { } catch (NativeIOException nioe) {
// ignore this error as FreeBSD returns EBADF even if length is zero
}
finally {
fis.close(); fis.close();
} }