HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit. (Contributed by Colin Patrick McCabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523153 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-09-14 00:05:29 +00:00
parent 40eb94ade3
commit 50af34f778
6 changed files with 44 additions and 73 deletions

View File

@ -272,44 +272,6 @@ public static void munlock(ByteBuffer buffer, long len)
munlock_native(buffer, len);
}
/**
* Resource limit types copied from <sys/resource.h>
*/
private static class ResourceLimit {
public static final int RLIMIT_CPU = 0;
public static final int RLIMIT_FSIZE = 1;
public static final int RLIMIT_DATA = 2;
public static final int RLIMIT_STACK = 3;
public static final int RLIMIT_CORE = 4;
public static final int RLIMIT_RSS = 5;
public static final int RLIMIT_NPROC = 6;
public static final int RLIMIT_NOFILE = 7;
public static final int RLIMIT_MEMLOCK = 8;
public static final int RLIMIT_AS = 9;
public static final int RLIMIT_LOCKS = 10;
public static final int RLIMIT_SIGPENDING = 11;
public static final int RLIMIT_MSGQUEUE = 12;
public static final int RLIMIT_NICE = 13;
public static final int RLIMIT_RTPRIO = 14;
public static final int RLIMIT_RTTIME = 15;
public static final int RLIMIT_NLIMITS = 16;
}
static native String getrlimit(int limit) throws NativeIOException;
/**
* Returns the soft limit on the number of bytes that may be locked by the
* process in bytes (RLIMIT_MEMLOCK).
*
* See the getrlimit(2) man page for more information
*
* @return maximum amount of locked memory in bytes
*/
public static long getMemlockLimit() throws IOException {
assertCodeLoaded();
String strLimit = getrlimit(ResourceLimit.RLIMIT_MEMLOCK);
return Long.parseLong(strLimit);
}
/** Linux only methods used for getOwner() implementation */
private static native long getUIDforFDOwnerforOwner(FileDescriptor fd) throws IOException;
private static native String getUserName(long uid) throws IOException;
@ -563,6 +525,20 @@ public static boolean isAvailable() {
/** Initialize the JNI method ID and class ID cache */
private static native void initNative();
/**
* Get the maximum number of bytes that can be locked into memory at any
* given point.
*
* @return 0 if no bytes can be locked into memory;
* Long.MAX_VALUE if there is no limit;
* The number of bytes that can be locked into memory otherwise.
*/
public static long getMemlockLimit() {
return isAvailable() ? getMemlockLimit0() : 0;
}
private static native long getMemlockLimit0();
private static class CachedUid {
final long timestamp;
final String username;

View File

@ -16,8 +16,6 @@
* limitations under the License.
*/
#define _GNU_SOURCE
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_io_nativeio_NativeIO.h"
@ -28,6 +26,7 @@
#include <grp.h>
#include <jni.h>
#include <pwd.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -414,36 +413,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native(
}
}
/**
* public static native String getrlimit(
* int resource);
*
* The "00024" in the function name is an artifact of how JNI encodes
* special characters. U+0024 is '$'.
*/
JNIEXPORT jstring JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getrlimit(
JNIEnv *env, jclass clazz,
jint resource)
{
jstring ret = NULL;
struct rlimit rlim;
int rc = getrlimit((int)resource, &rlim);
if (rc != 0) {
throw_ioe(env, errno);
goto cleanup;
}
// Convert soft limit into a string
char limit[17];
int len = snprintf(&limit, 17, "%d", rlim.rlim_cur);
ret = (*env)->NewStringUTF(env,&limit);
cleanup:
return ret;
}
#ifdef __FreeBSD__
static int toFreeBSDFlags(int flags)
{
@ -1008,6 +977,24 @@ done:
#endif
}
JNIEXPORT jlong JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0(
JNIEnv *env, jclass clazz)
{
#ifdef WINDOWS
return 0;
#else
struct rlimit rlim;
int rc = getrlimit(RLIMIT_MEMLOCK, &rlim);
if (rc != 0) {
throw_ioe(env, errno);
return 0;
}
return (rlim.rlim_cur == RLIM_INFINITY) ?
INT64_MAX : rlim.rlim_cur;
#endif
}
/**
* vim: sw=2: ts=2: et:
*/

View File

@ -583,6 +583,6 @@ public void testMlock() throws Exception {
@Test(timeout=10000)
public void testGetMemlockLimit() throws Exception {
assumeTrue(NativeIO.isAvailable());
NativeIO.POSIX.getMemlockLimit();
NativeIO.getMemlockLimit();
}
}

View File

@ -46,3 +46,7 @@ HDFS-4949 (Unreleased)
cache report. (Contributed by Colin Patrick McCabe)
HDFS-5195. Prevent passing null pointer to mlock and munlock. (cnauroth)
HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit
(Contributed by Colin Patrick McCabe)

View File

@ -745,7 +745,7 @@ void startDataNode(Configuration conf,
" size (%s) is greater than zero and native code is not available.",
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
}
long ulimit = NativeIO.POSIX.getMemlockLimit();
long ulimit = NativeIO.getMemlockLimit();
if (dnConf.maxLockedMemory > ulimit) {
throw new RuntimeException(String.format(
"Cannot start datanode because the configured max locked memory" +

View File

@ -113,11 +113,15 @@ private static String makeURI(String scheme, String host, String path)
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
assumeTrue(NativeIO.isAvailable());
final long memlockLimit = NativeIO.POSIX.getMemlockLimit();
final long memlockLimit = NativeIO.getMemlockLimit();
Configuration conf = cluster.getConfiguration(0);
// Try starting the DN with limit configured to the ulimit
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
memlockLimit);
if (memlockLimit == Long.MAX_VALUE) {
// Can't increase the memlock limit past the maximum.
return;
}
DataNode dn = null;
dn = DataNode.createDataNode(new String[]{}, conf);
dn.shutdown();