From 67a3bf92b713163f767baff8afd9c72fa5bcf1b7 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 31 Mar 2011 05:33:28 +0000 Subject: [PATCH] Added note on nproc and changed it so we print ulimit -a instead of ulimit -n into out server logs git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1087188 13f79535-47bb-0310-9956-ffa450edef68 --- bin/hbase-daemon.sh | 2 +- src/docbkx/getting_started.xml | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/bin/hbase-daemon.sh b/bin/hbase-daemon.sh index fcc3e4fa655..4ab37d97a75 100755 --- a/bin/hbase-daemon.sh +++ b/bin/hbase-daemon.sh @@ -140,7 +140,7 @@ case $startStop in echo starting $command, logging to $logout # Add to the command log file vital stats on our environment. echo "`date` Starting $command on `hostname`" >> $loglog - echo "ulimit -n `ulimit -n`" >> $loglog 2>&1 + echo "`ulimit -a`" >> $loglog 2>&1 nohup nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \ --config "${HBASE_CONF_DIR}" \ $command $startStop "$@" > "$logout" 2>&1 < /dev/null & diff --git a/src/docbkx/getting_started.xml b/src/docbkx/getting_started.xml index d6a68605a80..d6c21f16585 100644 --- a/src/docbkx/getting_started.xml +++ b/src/docbkx/getting_started.xml @@ -319,13 +319,19 @@ stopping hbase...............
- <varname>ulimit</varname><indexterm> + <title> + <varname>ulimit</varname><indexterm> <primary>ulimit</primary> - </indexterm> + + and + nproc + nproc + + - HBase is a database, it uses a lot of files at the same time. - The default ulimit -n of 1024 on *nix systems is insufficient. Any - significant amount of loading will lead you to HBase is a database, it uses a lot of files all at the same time. + The default ulimit -n -- i.e. user file limit -- of 1024 on *nix systems + is insufficient. Any significant amount of loading will lead you to FAQ: Why do I see "java.io.IOException...(Too many open files)" in my logs?. You may also notice errors such as @@ -333,9 +339,14 @@ stopping hbase............... 2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Abandoning block blk_-6935524980745310745_1391901 Do yourself a favor and change the upper bound on the number of file descriptors. Set it to north of 10k. See the above - referenced FAQ for how. + referenced FAQ for how. You should also up the hbase users' + nproc setting; under load, a low-nproc + setting could manifest as OutOfMemoryError + See Jack Levin's major hdfs issues + note up on the user list.. + - To be clear, upping the file descriptors for the user who is + To be clear, upping the file descriptors and nproc for the user who is running the HBase process is an operating system configuration, not an HBase configuration. Also, a common mistake is that administrators will up the file descriptors for a particular user but for whatever @@ -358,12 +369,12 @@ stopping hbase............... a line like: hadoop - nofile 32768 Replace hadoop with whatever user is running Hadoop and HBase. If you have separate users, you will need 2 - entries, one for each user. + entries, one for each user. In the same file set nproc hard and soft + limits. For example: hadoop soft/hard nproc 32000. In the file /etc/pam.d/common-session add as the last line in the file: session required pam_limits.so - Otherwise the changes in - /etc/security/limits.conf won't be + Otherwise the changes in /etc/security/limits.conf won't be applied. Don't forget to log out and back in again for the changes to